mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
81 Commits
transform-
...
release/v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21bcd0fc81 | ||
|
|
49db50af81 | ||
|
|
8c804f6eeb | ||
|
|
e060280ddc | ||
|
|
0787c5da66 | ||
|
|
1cd6abb61f | ||
|
|
e3927ea6f7 | ||
|
|
a6571d3392 | ||
|
|
1255638e84 | ||
|
|
1578c004b0 | ||
|
|
5f8d849981 | ||
|
|
3029b47a89 | ||
|
|
14d997e2d1 | ||
|
|
0aab68c23b | ||
|
|
027284ed1b | ||
|
|
6a958e2c36 | ||
|
|
db345c92df | ||
|
|
55ced9aa71 | ||
|
|
3633f25d0c | ||
|
|
63bbfd04c7 | ||
|
|
2f260d8b27 | ||
|
|
4d8fe29ea8 | ||
|
|
dbb3f2d98d | ||
|
|
9926e3bc78 | ||
|
|
0dd02e93cf | ||
|
|
73e6bf399d | ||
|
|
4402f638cd | ||
|
|
c199604ece | ||
|
|
2b72e66536 | ||
|
|
7c135c0ef9 | ||
|
|
9289265f54 | ||
|
|
485782af51 | ||
|
|
4b263ef1cc | ||
|
|
08f59008cc | ||
|
|
a2852affeb | ||
|
|
cdba7b442f | ||
|
|
42bf7e9965 | ||
|
|
a70b4d7eba | ||
|
|
408013c22b | ||
|
|
22c8a7656b | ||
|
|
35898f0b2e | ||
|
|
1101e98651 | ||
|
|
0089cf1b4f | ||
|
|
d7c3c8e124 | ||
|
|
f4b9eac465 | ||
|
|
aa6c2de42a | ||
|
|
175fddb3b5 | ||
|
|
6afc4e778a | ||
|
|
3bbcde8e58 | ||
|
|
3bf9981aab | ||
|
|
c47ad548a4 | ||
|
|
0b6d78a527 | ||
|
|
d616bd92ef | ||
|
|
84aa5b7b22 | ||
|
|
cbf21e53a9 | ||
|
|
6248a6ccf5 | ||
|
|
0e0c4faf0d | ||
|
|
1a02fc31c2 | ||
|
|
8efbafa538 | ||
|
|
fcd0ceea94 | ||
|
|
22f31f5929 | ||
|
|
5d20acca44 | ||
|
|
e3733344fe | ||
|
|
305767e226 | ||
|
|
22a662f6bc | ||
|
|
1431393fc8 | ||
|
|
dfe8cf25f9 | ||
|
|
cccd25ddbb | ||
|
|
ac387bd2af | ||
|
|
2e9737c01d | ||
|
|
a8b426aebe | ||
|
|
f3509fa312 | ||
|
|
3dcd6b8e51 | ||
|
|
f221ee30fd | ||
|
|
fb822987a9 | ||
|
|
4ab6dc2825 | ||
|
|
191755fc42 | ||
|
|
1676d02149 | ||
|
|
edc49623de | ||
|
|
9405d1c578 | ||
|
|
7a4276c24a |
14
.github/scripts/check-install-script.sh
vendored
Executable file
14
.github/scripts/check-install-script.sh
vendored
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
# Get the latest version of github.com/GreptimeTeam/greptimedb
|
||||
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
|
||||
|
||||
echo "Downloading the latest version: $VERSION"
|
||||
|
||||
# Download the install script
|
||||
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
|
||||
|
||||
# Execute the `greptime` command
|
||||
./greptime --version
|
||||
4
.github/workflows/nightly-ci.yml
vendored
4
.github/workflows/nightly-ci.yml
vendored
@@ -22,6 +22,10 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check install.sh
|
||||
run: ./.github/scripts/check-install-script.sh
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
NEXT_RELEASE_VERSION: v0.11.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
@@ -17,6 +17,6 @@ repos:
|
||||
- id: fmt
|
||||
- id: clippy
|
||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||
stages: [push]
|
||||
stages: [pre-push]
|
||||
- id: cargo-check
|
||||
args: ["--workspace", "--all-targets", "--all-features"]
|
||||
|
||||
405
Cargo.lock
generated
405
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -2,23 +2,25 @@
|
||||
members = [
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/cache",
|
||||
"src/catalog",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/decimal",
|
||||
"src/common/error",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/macro",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/options",
|
||||
"src/common/plugins",
|
||||
"src/common/pprof",
|
||||
"src/common/procedure",
|
||||
@@ -30,7 +32,6 @@ members = [
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/decimal",
|
||||
"src/common/version",
|
||||
"src/common/wal",
|
||||
"src/datanode",
|
||||
@@ -38,6 +39,7 @@ members = [
|
||||
"src/file-engine",
|
||||
"src/flow",
|
||||
"src/frontend",
|
||||
"src/index",
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
@@ -57,7 +59,6 @@ members = [
|
||||
"src/sql",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/index",
|
||||
"tests-fuzz",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
@@ -65,7 +66,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.5"
|
||||
version = "0.10.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -117,15 +118,16 @@ datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = { version = "0.13" }
|
||||
etcd-client = "0.13"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
|
||||
hex = "0.4"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
@@ -151,7 +153,7 @@ raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
ratelimit = "0.9"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.4" }
|
||||
regex-automata = "0.4"
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
@@ -182,11 +184,11 @@ strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-stream = "0.1"
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||
tower = { version = "0.4" }
|
||||
tower = "0.4"
|
||||
tracing-appender = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
@@ -214,6 +216,7 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-options = { path = "src/common/options" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
@@ -261,6 +264,8 @@ tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||
# Apply a fix for pprof for unaligned pointer access
|
||||
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
|
||||
16
README.md
16
README.md
@@ -6,7 +6,7 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
|
||||
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
@@ -48,9 +48,21 @@
|
||||
</a>
|
||||
</div>
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [**Features: Why GreptimeDB**](#why-greptimedb)
|
||||
- [Architecture](https://docs.greptime.com/contributor-guide/overview/#architecture)
|
||||
- [Try it for free](#try-greptimedb)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Project Status](#project-status)
|
||||
- [Join the community](#community)
|
||||
- [Contributing](#contributing)
|
||||
- [Extension](#extension )
|
||||
- [License](#license)
|
||||
- [Acknowledgement](#acknowledgement)
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
|
||||
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
|
||||
@@ -93,8 +93,8 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -109,6 +109,11 @@
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -126,9 +131,9 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
@@ -416,8 +421,8 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
@@ -432,6 +437,11 @@
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -449,9 +459,9 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
|
||||
@@ -294,14 +294,14 @@ data_home = "/tmp/greptimedb/"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc.
|
||||
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
|
||||
## The local file cache directory.
|
||||
## @toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
|
||||
## The local file cache capacity in bytes.
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "256MB"
|
||||
cache_capacity = "1GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
@@ -375,6 +375,23 @@ endpoint = "https://s3.amazonaws.com"
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
@@ -459,14 +476,14 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "1GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -332,14 +332,14 @@ data_home = "/tmp/greptimedb/"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc.
|
||||
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
|
||||
## The local file cache directory.
|
||||
## @toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
|
||||
## The local file cache capacity in bytes.
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "256MB"
|
||||
cache_capacity = "1GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
@@ -413,6 +413,23 @@ endpoint = "https://s3.amazonaws.com"
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
@@ -497,14 +514,14 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "1GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -4,13 +4,13 @@
|
||||
|
||||
example:
|
||||
```bash
|
||||
curl --data "trace;flow=debug" 127.0.0.1:4000/debug/log_level
|
||||
curl --data "trace,flow=debug" 127.0.0.1:4000/debug/log_level
|
||||
```
|
||||
And database will reply with something like:
|
||||
```bash
|
||||
Log Level changed from Some("info") to "trace;flow=debug"%
|
||||
Log Level changed from Some("info") to "trace,flow=debug"%
|
||||
```
|
||||
|
||||
The data is a string in the format of `global_level;module1=level1;module2=level2;...` that follow the same rule of `RUST_LOG`.
|
||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`.
|
||||
|
||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||
@@ -5,6 +5,13 @@ GreptimeDB's official Grafana dashboard.
|
||||
|
||||
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||
|
||||
If you use Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
|
||||
|
||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
|
||||
|
||||
# How to use
|
||||
|
||||
## `greptimedb.json`
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
|
||||
set -ue
|
||||
|
||||
@@ -15,7 +15,7 @@ GITHUB_ORG=GreptimeTeam
|
||||
GITHUB_REPO=greptimedb
|
||||
BIN=greptime
|
||||
|
||||
function get_os_type() {
|
||||
get_os_type() {
|
||||
os_type="$(uname -s)"
|
||||
|
||||
case "$os_type" in
|
||||
@@ -31,7 +31,7 @@ function get_os_type() {
|
||||
esac
|
||||
}
|
||||
|
||||
function get_arch_type() {
|
||||
get_arch_type() {
|
||||
arch_type="$(uname -m)"
|
||||
|
||||
case "$arch_type" in
|
||||
@@ -53,7 +53,7 @@ function get_arch_type() {
|
||||
esac
|
||||
}
|
||||
|
||||
function download_artifact() {
|
||||
download_artifact() {
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest stable released version.
|
||||
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
|
||||
|
||||
@@ -36,15 +36,14 @@ use datatypes::vectors::{
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector, VectorRef,
|
||||
};
|
||||
use greptime_proto::v1;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{
|
||||
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension, QueryRequest,
|
||||
Row, SemanticType,
|
||||
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension,
|
||||
QueryRequest, Row, SemanticType, VectorTypeExtension,
|
||||
};
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
@@ -150,6 +149,17 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ConcreteDataType::decimal128_default_datatype()
|
||||
}
|
||||
}
|
||||
ColumnDataType::Vector => {
|
||||
if let Some(TypeExt::VectorType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||
{
|
||||
ConcreteDataType::vector_datatype(d.dim)
|
||||
} else {
|
||||
ConcreteDataType::vector_default_datatype()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -231,6 +241,15 @@ impl ColumnDataTypeWrapper {
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vector_datatype(dim: u32) -> Self {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::Vector,
|
||||
datatype_ext: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::VectorType(VectorTypeExtension { dim })),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
@@ -249,7 +268,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
|
||||
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
|
||||
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
|
||||
ConcreteDataType::Binary(_) | ConcreteDataType::Json(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
@@ -271,6 +290,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
},
|
||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||
ConcreteDataType::Json(_) => ColumnDataType::Json,
|
||||
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
@@ -289,15 +310,17 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
})),
|
||||
})
|
||||
}
|
||||
ColumnDataType::Binary => {
|
||||
if datatype == ConcreteDataType::json_datatype() {
|
||||
// Json is the same as binary in proto. The extension marks the binary in proto is actually a json.
|
||||
Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
ColumnDataType::Json => datatype.as_json().map(|_| ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||
}),
|
||||
ColumnDataType::Vector => {
|
||||
datatype
|
||||
.as_vector()
|
||||
.map(|vector_type| ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::VectorType(VectorTypeExtension {
|
||||
dim: vector_type.dim as _,
|
||||
})),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
@@ -422,6 +445,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
string_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Vector => Values {
|
||||
binary_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -500,13 +527,14 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
match request.expr {
|
||||
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
|
||||
Some(Expr::CreateTable(_)) => "ddl.create_table",
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::AlterTable(_)) => "ddl.alter_table",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -673,6 +701,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||
}),
|
||||
)),
|
||||
ConcreteDataType::Vector(_) => Arc::new(BinaryVector::from_vec(values.binary_values)),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
@@ -838,6 +867,7 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Vector(_) => values.binary_values.into_iter().map(|v| v.into()).collect(),
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
@@ -862,10 +892,7 @@ pub fn is_column_type_value_eq(
|
||||
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||
.map(|wrapper| {
|
||||
let datatype = ConcreteDataType::from(wrapper);
|
||||
(datatype == *expect_type)
|
||||
// Json type leverage binary type in pb, so this is valid.
|
||||
|| (datatype == ConcreteDataType::binary_datatype()
|
||||
&& *expect_type == ConcreteDataType::json_datatype())
|
||||
expect_type == &datatype
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
||||
@@ -1152,6 +1179,10 @@ mod tests {
|
||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||
let values = values.decimal128_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Vector, 2);
|
||||
let values = values.binary_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1239,7 +1270,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2),
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||
)
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::vector_datatype(3),
|
||||
ColumnDataTypeWrapper::vector_datatype(3).into()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1335,6 +1370,10 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::vector_datatype(3),
|
||||
ConcreteDataType::vector_datatype(3).try_into().unwrap()
|
||||
);
|
||||
|
||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||
assert!(result.is_err());
|
||||
|
||||
@@ -15,8 +15,10 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
||||
FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
};
|
||||
use greptime_proto::v1::Analyzer;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -25,6 +27,8 @@ use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||
|
||||
/// Key used to store fulltext options in gRPC column options.
|
||||
const FULLTEXT_GRPC_KEY: &str = "fulltext";
|
||||
/// Key used to store inverted index options in gRPC column options.
|
||||
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
|
||||
|
||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
@@ -49,10 +53,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
if !column_def.comment.is_empty() {
|
||||
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
|
||||
}
|
||||
if let Some(options) = column_def.options.as_ref()
|
||||
&& let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
|
||||
{
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
|
||||
if let Some(options) = column_def.options.as_ref() {
|
||||
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
|
||||
}
|
||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
||||
}
|
||||
}
|
||||
|
||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||
@@ -70,7 +77,12 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
|
||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
|
||||
}
|
||||
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
||||
options
|
||||
.options
|
||||
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
|
||||
}
|
||||
|
||||
(!options.options.is_empty()).then_some(options)
|
||||
@@ -93,6 +105,14 @@ pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<Column
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
match analyzer {
|
||||
Analyzer::English => FulltextAnalyzer::English,
|
||||
Analyzer::Chinese => FulltextAnalyzer::Chinese,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -115,10 +135,13 @@ mod tests {
|
||||
comment: "test_comment".to_string(),
|
||||
datatype_extension: None,
|
||||
options: Some(ColumnOptions {
|
||||
options: HashMap::from([(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
)]),
|
||||
options: HashMap::from([
|
||||
(
|
||||
FULLTEXT_GRPC_KEY.to_string(),
|
||||
"{\"enable\":true}".to_string(),
|
||||
),
|
||||
(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string()),
|
||||
]),
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -139,6 +162,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
assert!(schema.is_inverted_indexed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -153,12 +177,17 @@ mod tests {
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
})
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
"true"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -178,6 +178,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Partition manager not found, it's not expected."))]
|
||||
PartitionManagerNotFound {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table partitions"))]
|
||||
FindPartitions { source: partition::error::Error },
|
||||
|
||||
@@ -301,6 +307,7 @@ impl ErrorExt for Error {
|
||||
| Error::CastManager { .. }
|
||||
| Error::Json { .. }
|
||||
| Error::GetInformationExtension { .. }
|
||||
| Error::PartitionManagerNotFound { .. }
|
||||
| Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend};
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
@@ -42,20 +43,20 @@ const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub struct CachedMetaKvBackendBuilder {
|
||||
pub struct CachedKvBackendBuilder {
|
||||
cache_max_capacity: Option<u64>,
|
||||
cache_ttl: Option<Duration>,
|
||||
cache_tti: Option<Duration>,
|
||||
meta_client: Arc<MetaClient>,
|
||||
inner: KvBackendRef,
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackendBuilder {
|
||||
pub fn new(meta_client: Arc<MetaClient>) -> Self {
|
||||
impl CachedKvBackendBuilder {
|
||||
pub fn new(inner: KvBackendRef) -> Self {
|
||||
Self {
|
||||
cache_max_capacity: None,
|
||||
cache_ttl: None,
|
||||
cache_tti: None,
|
||||
meta_client,
|
||||
inner,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +75,7 @@ impl CachedMetaKvBackendBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> CachedMetaKvBackend {
|
||||
pub fn build(self) -> CachedKvBackend {
|
||||
let cache_max_capacity = self
|
||||
.cache_max_capacity
|
||||
.unwrap_or(DEFAULT_CACHE_MAX_CAPACITY);
|
||||
@@ -85,14 +86,11 @@ impl CachedMetaKvBackendBuilder {
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build();
|
||||
|
||||
let kv_backend = Arc::new(MetaKvBackend {
|
||||
client: self.meta_client,
|
||||
});
|
||||
let kv_backend = self.inner;
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
let version = AtomicUsize::new(0);
|
||||
|
||||
CachedMetaKvBackend {
|
||||
CachedKvBackend {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
@@ -112,19 +110,29 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
|
||||
/// Therefore, it is recommended to use CachedMetaKvBackend to only read metadata related
|
||||
/// information. Note: If you read other information, you may read expired data, which depends on
|
||||
/// TTL and TTI for cache.
|
||||
pub struct CachedMetaKvBackend {
|
||||
pub struct CachedKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackend,
|
||||
name: String,
|
||||
version: AtomicUsize,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
#[async_trait::async_trait]
|
||||
impl TxnService for CachedKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> std::result::Result<TxnResponse, Self::Error> {
|
||||
// TODO(hl): txn of CachedKvBackend simply pass through to inner backend without invalidating caches.
|
||||
self.kv_backend.txn(txn).await
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.kv_backend.max_txn_ops()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
impl KvBackend for CachedKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
@@ -305,7 +313,7 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
impl KvCacheInvalidator for CachedKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.create_new_version();
|
||||
self.cache.invalidate(key).await;
|
||||
@@ -313,7 +321,7 @@ impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
impl CachedKvBackend {
|
||||
// only for test
|
||||
#[cfg(test)]
|
||||
fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
@@ -466,7 +474,7 @@ mod tests {
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
|
||||
use super::CachedMetaKvBackend;
|
||||
use super::CachedKvBackend;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SimpleKvBackend {
|
||||
@@ -540,7 +548,7 @@ mod tests {
|
||||
async fn test_cached_kv_backend() {
|
||||
let simple_kv = Arc::new(SimpleKvBackend::default());
|
||||
let get_execute_times = simple_kv.get_execute_times.clone();
|
||||
let cached_kv = CachedMetaKvBackend::wrap(simple_kv);
|
||||
let cached_kv = CachedKvBackend::wrap(simple_kv);
|
||||
|
||||
add_some_vals(&cached_kv).await;
|
||||
|
||||
|
||||
@@ -34,15 +34,14 @@ use datatypes::vectors::{
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
use partition::partition::PartitionDef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use super::PARTITIONS;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
@@ -236,7 +235,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
let partition_manager = catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|catalog_manager| catalog_manager.partition_manager());
|
||||
.map(|catalog_manager| catalog_manager.partition_manager())
|
||||
.context(PartitionManagerNotFoundSnafu)?;
|
||||
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
@@ -262,27 +262,10 @@ impl InformationSchemaPartitionsBuilder {
|
||||
let table_ids: Vec<TableId> =
|
||||
table_infos.iter().map(|info| info.ident.table_id).collect();
|
||||
|
||||
let mut table_partitions = if let Some(partition_manager) = &partition_manager {
|
||||
partition_manager
|
||||
.batch_find_table_partitions(&table_ids)
|
||||
.await
|
||||
.context(FindPartitionsSnafu)?
|
||||
} else {
|
||||
// Current node must be a standalone instance, contains only one partition by default.
|
||||
// TODO(dennis): change it when we support multi-regions for standalone.
|
||||
table_ids
|
||||
.into_iter()
|
||||
.map(|table_id| {
|
||||
(
|
||||
table_id,
|
||||
vec![PartitionInfo {
|
||||
id: RegionId::new(table_id, 0),
|
||||
partition: PartitionDef::new(vec![], vec![]),
|
||||
}],
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
let mut table_partitions = partition_manager
|
||||
.batch_find_table_partitions(&table_ids)
|
||||
.await
|
||||
.context(FindPartitionsSnafu)?;
|
||||
|
||||
for table_info in table_infos {
|
||||
let partitions = table_partitions
|
||||
|
||||
@@ -180,7 +180,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
// information_schema is not available from this
|
||||
// table_metadata_manager and we return None
|
||||
.map(|schema_opts| format!("{schema_opts}"))
|
||||
.map(|schema_opts| format!("{}", schema_opts.into_inner()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
@@ -12,13 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_catalog::consts::{INFORMATION_SCHEMA_TABLES_TABLE_ID, MITO_ENGINE};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
@@ -31,7 +34,7 @@ use datatypes::vectors::{
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use super::TABLES;
|
||||
@@ -39,6 +42,7 @@ use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
@@ -234,17 +238,51 @@ impl InformationSchemaTablesBuilder {
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
let information_extension = utils::information_extension(&self.catalog_manager)?;
|
||||
|
||||
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
|
||||
// But we don't want the statements such as `show tables` fail,
|
||||
// so using `unwrap_or_else` here instead of `?` operator.
|
||||
let region_stats = information_extension
|
||||
.region_stats()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(e; "Failed to call region_stats");
|
||||
e
|
||||
})
|
||||
.unwrap_or_else(|_| vec![]);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
|
||||
// TODO(dennis): make it working for metric engine
|
||||
let table_region_stats =
|
||||
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
|
||||
let region_ids = table_info
|
||||
.meta
|
||||
.region_numbers
|
||||
.iter()
|
||||
.map(|n| RegionId::new(table_info.ident.table_id, *n))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
region_stats
|
||||
.iter()
|
||||
.filter(|stat| region_ids.contains(&stat.id))
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
self.add_table(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_info,
|
||||
table.table_type(),
|
||||
&table_region_stats,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -260,6 +298,7 @@ impl InformationSchemaTablesBuilder {
|
||||
schema_name: &str,
|
||||
table_info: Arc<TableInfo>,
|
||||
table_type: TableType,
|
||||
region_stats: &[&RegionStat],
|
||||
) {
|
||||
let table_name = table_info.name.as_ref();
|
||||
let table_id = table_info.table_id();
|
||||
@@ -273,7 +312,9 @@ impl InformationSchemaTablesBuilder {
|
||||
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(catalog_name)),
|
||||
(TABLE_ID, &Value::from(table_id)),
|
||||
(TABLE_SCHEMA, &Value::from(schema_name)),
|
||||
(ENGINE, &Value::from(engine)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(TABLE_TYPE, &Value::from(table_type_text)),
|
||||
];
|
||||
@@ -287,21 +328,39 @@ impl InformationSchemaTablesBuilder {
|
||||
self.table_names.push(Some(table_name));
|
||||
self.table_types.push(Some(table_type_text));
|
||||
self.table_ids.push(Some(table_id));
|
||||
|
||||
let data_length = region_stats.iter().map(|stat| stat.sst_size).sum();
|
||||
let table_rows = region_stats.iter().map(|stat| stat.num_rows).sum();
|
||||
let index_length = region_stats.iter().map(|stat| stat.index_size).sum();
|
||||
|
||||
// It's not precise, but it is acceptable for long-term data storage.
|
||||
let avg_row_length = if table_rows > 0 {
|
||||
let total_data_length = data_length
|
||||
+ region_stats
|
||||
.iter()
|
||||
.map(|stat| stat.memtable_size)
|
||||
.sum::<u64>();
|
||||
|
||||
total_data_length / table_rows
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
self.data_length.push(Some(data_length));
|
||||
self.index_length.push(Some(index_length));
|
||||
self.table_rows.push(Some(table_rows));
|
||||
self.avg_row_length.push(Some(avg_row_length));
|
||||
|
||||
// TODO(sunng87): use real data for these fields
|
||||
self.data_length.push(Some(0));
|
||||
self.max_data_length.push(Some(0));
|
||||
self.index_length.push(Some(0));
|
||||
self.avg_row_length.push(Some(0));
|
||||
self.max_index_length.push(Some(0));
|
||||
self.checksum.push(Some(0));
|
||||
self.table_rows.push(Some(0));
|
||||
self.max_index_length.push(Some(0));
|
||||
self.data_free.push(Some(0));
|
||||
self.auto_increment.push(Some(0));
|
||||
self.row_format.push(Some("Fixed"));
|
||||
self.table_collation.push(Some("utf8_bin"));
|
||||
self.update_time.push(None);
|
||||
self.check_time.push(None);
|
||||
|
||||
// use mariadb default table version number here
|
||||
self.version.push(Some(11));
|
||||
self.table_comment.push(table_info.desc.as_deref());
|
||||
|
||||
@@ -18,7 +18,7 @@ use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
|
||||
AlterTableExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
|
||||
QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::Ticket;
|
||||
@@ -211,9 +211,9 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
pub async fn alter(&self, expr: AlterTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
expr: Some(DdlExpr::AlterTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
@@ -52,6 +53,7 @@ flow.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "2.0"
|
||||
humantime.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
|
||||
@@ -12,11 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use humantime::format_duration;
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -26,10 +30,16 @@ pub(crate) struct DatabaseClient {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_header: Option<String>,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl DatabaseClient {
|
||||
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
|
||||
pub fn new(
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_basic: Option<String>,
|
||||
timeout: Duration,
|
||||
) -> Self {
|
||||
let auth_header = if let Some(basic) = auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
@@ -41,6 +51,7 @@ impl DatabaseClient {
|
||||
addr,
|
||||
catalog,
|
||||
auth_header,
|
||||
timeout,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +74,11 @@ impl DatabaseClient {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
request = request.header(
|
||||
GREPTIME_DB_HEADER_TIMEOUT,
|
||||
format_duration(self.timeout).to_string(),
|
||||
);
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
@@ -83,14 +84,26 @@ pub struct ExportCommand {
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
|
||||
/// The timeout of invoking the database.
|
||||
///
|
||||
/// It is used to override the server-side timeout setting.
|
||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||
#[clap(long, value_parser = humantime::parse_duration)]
|
||||
timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
let database_client = DatabaseClient::new(
|
||||
self.addr.clone(),
|
||||
catalog.clone(),
|
||||
self.auth_basic.clone(),
|
||||
// Treats `None` as `0s` to disable server-side default timeout.
|
||||
self.timeout.unwrap_or_default(),
|
||||
);
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Export {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
@@ -68,13 +69,25 @@ pub struct ImportCommand {
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
|
||||
/// The timeout of invoking the database.
|
||||
///
|
||||
/// It is used to override the server-side timeout setting.
|
||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||
#[clap(long, value_parser = humantime::parse_duration)]
|
||||
timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl ImportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
let database_client = DatabaseClient::new(
|
||||
self.addr.clone(),
|
||||
catalog.clone(),
|
||||
self.auth_basic.clone(),
|
||||
// Treats `None` as `0s` to disable server-side default timeout.
|
||||
self.timeout.unwrap_or_default(),
|
||||
);
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Import {
|
||||
|
||||
@@ -21,13 +21,14 @@ use cache::{
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
CachedKvBackend, CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
@@ -258,8 +259,9 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let cached_meta_backend =
|
||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||
let cached_meta_backend = Arc::new(
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone()))).build(),
|
||||
);
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
@@ -246,11 +246,12 @@ impl StartCommand {
|
||||
let cache_tti = meta_config.metadata_cache_tti;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend =
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
@@ -293,11 +293,12 @@ impl StartCommand {
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend =
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
|
||||
@@ -43,6 +43,31 @@ lazy_static::lazy_static! {
|
||||
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
|
||||
}
|
||||
|
||||
/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
|
||||
#[cfg(unix)]
|
||||
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
let mut sigint = signal(SignalKind::interrupt())?;
|
||||
let mut sigterm = signal(SignalKind::terminate())?;
|
||||
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => {
|
||||
info!("Received SIGINT, shutting down");
|
||||
}
|
||||
_ = sigterm.recv() => {
|
||||
info!("Received SIGTERM, shutting down");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// wait for the close signal, for non-unix platform it's ctrl-c
|
||||
#[cfg(not(unix))]
|
||||
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
||||
tokio::signal::ctrl_c().await
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait App: Send {
|
||||
fn name(&self) -> &str;
|
||||
@@ -69,9 +94,9 @@ pub trait App: Send {
|
||||
self.start().await?;
|
||||
|
||||
if self.wait_signal() {
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!(e; "Failed to listen for ctrl-c signal");
|
||||
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
||||
if let Err(e) = start_wait_for_close_signal().await {
|
||||
error!(e; "Failed to listen for close signal");
|
||||
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
}
|
||||
|
||||
@@ -20,13 +20,13 @@ use common_config::Configurable;
|
||||
use common_grpc::channel_manager::{
|
||||
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use file_engine::config::EngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::service_config::datanode::DatanodeClientOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use meta_srv::metasrv::MetasrvOptions;
|
||||
use meta_srv::selector::SelectorType;
|
||||
@@ -126,10 +126,11 @@ fn test_load_frontend_example_config() {
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
datanode: frontend::service_config::DatanodeOptions {
|
||||
client: DatanodeClientOptions {
|
||||
datanode: DatanodeClientOptions {
|
||||
client: ClientOptions {
|
||||
connect_timeout: Duration::from_secs(10),
|
||||
tcp_nodelay: true,
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
export_metrics: ExportMetricsOption {
|
||||
@@ -166,8 +167,8 @@ fn test_load_metasrv_example_config() {
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
datanode: meta_srv::metasrv::DatanodeOptions {
|
||||
client: meta_srv::metasrv::DatanodeClientOptions {
|
||||
datanode: DatanodeClientOptions {
|
||||
client: ClientOptions {
|
||||
timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(10),
|
||||
tcp_nodelay: true,
|
||||
|
||||
@@ -16,9 +16,12 @@ common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
futures.workspace = true
|
||||
paste = "1.0"
|
||||
pin-project.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
zeroize = { version = "1.6", default-features = false, features = ["alloc"] }
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
@@ -12,12 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::ops::Range;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::{BufMut, Bytes};
|
||||
use futures::{AsyncReadExt, AsyncSeekExt};
|
||||
use futures::AsyncRead;
|
||||
use pin_project::pin_project;
|
||||
use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
/// `Metadata` contains the metadata of a source.
|
||||
pub struct Metadata {
|
||||
@@ -61,7 +69,7 @@ pub trait RangeReader: Send + Unpin {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<R: RangeReader + Send + Unpin> RangeReader for &mut R {
|
||||
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
(*self).metadata().await
|
||||
}
|
||||
@@ -80,26 +88,212 @@ impl<R: RangeReader + Send + Unpin> RangeReader for &mut R {
|
||||
}
|
||||
}
|
||||
|
||||
/// `RangeReaderAdapter` bridges `RangeReader` and `AsyncRead + AsyncSeek`.
|
||||
pub struct RangeReaderAdapter<R>(pub R);
|
||||
/// `AsyncReadAdapter` adapts a `RangeReader` to an `AsyncRead`.
|
||||
#[pin_project]
|
||||
pub struct AsyncReadAdapter<R> {
|
||||
/// The inner `RangeReader`.
|
||||
/// Use `Mutex` to get rid of the borrow checker issue.
|
||||
inner: Arc<Mutex<R>>,
|
||||
|
||||
/// The current position from the view of the reader.
|
||||
position: u64,
|
||||
|
||||
/// The buffer for the read bytes.
|
||||
buffer: Vec<u8>,
|
||||
|
||||
/// The length of the content.
|
||||
content_length: u64,
|
||||
|
||||
/// The future for reading the next bytes.
|
||||
#[pin]
|
||||
read_fut: Option<Pin<Box<dyn Future<Output = io::Result<Bytes>> + Send>>>,
|
||||
}
|
||||
|
||||
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
|
||||
pub async fn new(inner: R) -> io::Result<Self> {
|
||||
let mut inner = inner;
|
||||
let metadata = inner.metadata().await?;
|
||||
Ok(AsyncReadAdapter {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
position: 0,
|
||||
buffer: Vec::new(),
|
||||
content_length: metadata.content_length,
|
||||
read_fut: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The maximum size per read for the inner reader in `AsyncReadAdapter`.
|
||||
const MAX_SIZE_PER_READ: usize = 8 * 1024 * 1024; // 8MB
|
||||
|
||||
impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let mut this = self.as_mut().project();
|
||||
|
||||
if *this.position >= *this.content_length {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
if !this.buffer.is_empty() {
|
||||
let to_read = this.buffer.len().min(buf.len());
|
||||
buf[..to_read].copy_from_slice(&this.buffer[..to_read]);
|
||||
this.buffer.drain(..to_read);
|
||||
*this.position += to_read as u64;
|
||||
return Poll::Ready(Ok(to_read));
|
||||
}
|
||||
|
||||
if this.read_fut.is_none() {
|
||||
let size = (*this.content_length - *this.position).min(MAX_SIZE_PER_READ as u64);
|
||||
let range = *this.position..(*this.position + size);
|
||||
let inner = this.inner.clone();
|
||||
let fut = async move {
|
||||
let mut inner = inner.lock().await;
|
||||
inner.read(range).await
|
||||
};
|
||||
|
||||
*this.read_fut = Some(Box::pin(fut));
|
||||
}
|
||||
|
||||
match this
|
||||
.read_fut
|
||||
.as_mut()
|
||||
.as_pin_mut()
|
||||
.expect("checked above")
|
||||
.poll(cx)
|
||||
{
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Ok(bytes)) => {
|
||||
*this.read_fut = None;
|
||||
|
||||
if !bytes.is_empty() {
|
||||
this.buffer.extend_from_slice(&bytes);
|
||||
self.poll_read(cx, buf)
|
||||
} else {
|
||||
Poll::Ready(Ok(0))
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
*this.read_fut = None;
|
||||
Poll::Ready(Err(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements `RangeReader` for a type that implements `AsyncRead + AsyncSeek`.
|
||||
///
|
||||
/// TODO(zhongzc): It's a temporary solution for porting the codebase from `AsyncRead + AsyncSeek` to `RangeReader`.
|
||||
/// Until the codebase is fully ported to `RangeReader`, remove this implementation.
|
||||
#[async_trait]
|
||||
impl<R: futures::AsyncRead + futures::AsyncSeek + Send + Unpin> RangeReader
|
||||
for RangeReaderAdapter<R>
|
||||
{
|
||||
impl RangeReader for Vec<u8> {
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
let content_length = self.0.seek(io::SeekFrom::End(0)).await?;
|
||||
Ok(Metadata { content_length })
|
||||
Ok(Metadata {
|
||||
content_length: self.len() as u64,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
|
||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
range.end = range.end.min(self.len() as u64);
|
||||
|
||||
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// `FileReader` is a `RangeReader` for reading a file.
|
||||
pub struct FileReader {
|
||||
content_length: u64,
|
||||
position: u64,
|
||||
file: tokio::fs::File,
|
||||
}
|
||||
|
||||
impl FileReader {
|
||||
/// Creates a new `FileReader` for the file at the given path.
|
||||
pub async fn new(path: impl AsRef<Path>) -> io::Result<Self> {
|
||||
let file = tokio::fs::File::open(path).await?;
|
||||
let metadata = file.metadata().await?;
|
||||
Ok(FileReader {
|
||||
content_length: metadata.len(),
|
||||
position: 0,
|
||||
file,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RangeReader for FileReader {
|
||||
async fn metadata(&mut self) -> io::Result<Metadata> {
|
||||
Ok(Metadata {
|
||||
content_length: self.content_length,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
|
||||
if range.start != self.position {
|
||||
self.file.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.position = range.start;
|
||||
}
|
||||
|
||||
range.end = range.end.min(self.content_length);
|
||||
if range.end <= self.position {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"Start of range is out of bounds",
|
||||
));
|
||||
}
|
||||
|
||||
let mut buf = vec![0; (range.end - range.start) as usize];
|
||||
self.0.seek(io::SeekFrom::Start(range.start)).await?;
|
||||
self.0.read_exact(&mut buf).await?;
|
||||
|
||||
self.file.read_exact(&mut buf).await?;
|
||||
self.position = range.end;
|
||||
|
||||
Ok(Bytes::from(buf))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use futures::io::AsyncReadExt as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_read_adapter() {
|
||||
let data = b"hello world";
|
||||
let reader = Vec::from(data);
|
||||
let mut adapter = AsyncReadAdapter::new(reader).await.unwrap();
|
||||
|
||||
let mut buf = Vec::new();
|
||||
adapter.read_to_end(&mut buf).await.unwrap();
|
||||
assert_eq!(buf, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_read_adapter_large() {
|
||||
let data = (0..20 * 1024 * 1024).map(|i| i as u8).collect::<Vec<u8>>();
|
||||
let mut adapter = AsyncReadAdapter::new(data.clone()).await.unwrap();
|
||||
|
||||
let mut buf = Vec::new();
|
||||
adapter.read_to_end(&mut buf).await.unwrap();
|
||||
assert_eq!(buf, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_reader() {
|
||||
let file = create_named_temp_file();
|
||||
let path = file.path();
|
||||
let data = b"hello world";
|
||||
tokio::fs::write(path, data).await.unwrap();
|
||||
|
||||
let mut reader = FileReader::new(path).await.unwrap();
|
||||
let metadata = reader.metadata().await.unwrap();
|
||||
assert_eq!(metadata.content_length, data.len() as u64);
|
||||
|
||||
let bytes = reader.read(0..metadata.content_length).await.unwrap();
|
||||
assert_eq!(&*bytes, data);
|
||||
|
||||
let bytes = reader.read(0..5).await.unwrap();
|
||||
assert_eq!(&*bytes, &data[..5]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ workspace = true
|
||||
|
||||
[features]
|
||||
default = ["geo"]
|
||||
geo = ["geohash", "h3o", "s2"]
|
||||
geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
@@ -28,9 +28,12 @@ common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
geo = { version = "0.29", optional = true }
|
||||
geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
jsonb.workspace = true
|
||||
nalgebra = "0.33"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
@@ -44,8 +47,10 @@ sql.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
wkt = { version = "0.11", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
ron = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -27,6 +27,7 @@ use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::numpy::NumpyFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::scalars::vector::VectorFunction;
|
||||
use crate::system::SystemFunction;
|
||||
use crate::table::TableFunction;
|
||||
|
||||
@@ -120,6 +121,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// Json related functions
|
||||
JsonFunction::register(&function_registry);
|
||||
|
||||
// Vector related functions
|
||||
VectorFunction::register(&function_registry);
|
||||
|
||||
// Geo functions
|
||||
#[cfg(feature = "geo")]
|
||||
crate::scalars::geo::GeoFunctions::register(&function_registry);
|
||||
|
||||
@@ -21,6 +21,7 @@ pub mod json;
|
||||
pub mod matches;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
pub mod vector;
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test;
|
||||
|
||||
@@ -17,7 +17,10 @@ pub(crate) mod encoding;
|
||||
mod geohash;
|
||||
mod h3;
|
||||
mod helpers;
|
||||
mod measure;
|
||||
mod relation;
|
||||
mod s2;
|
||||
mod wkt;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
@@ -48,6 +51,7 @@ impl GeoFunctions {
|
||||
registry.register(Arc::new(h3::H3CellToChildrenSize));
|
||||
registry.register(Arc::new(h3::H3CellToChildPos));
|
||||
registry.register(Arc::new(h3::H3ChildPosToCell));
|
||||
registry.register(Arc::new(h3::H3CellContains));
|
||||
|
||||
// h3 grid traversal
|
||||
registry.register(Arc::new(h3::H3GridDisk));
|
||||
@@ -55,10 +59,27 @@ impl GeoFunctions {
|
||||
registry.register(Arc::new(h3::H3GridDistance));
|
||||
registry.register(Arc::new(h3::H3GridPathCells));
|
||||
|
||||
// h3 measurement
|
||||
registry.register(Arc::new(h3::H3CellDistanceSphereKm));
|
||||
registry.register(Arc::new(h3::H3CellDistanceEuclideanDegree));
|
||||
|
||||
// s2
|
||||
registry.register(Arc::new(s2::S2LatLngToCell));
|
||||
registry.register(Arc::new(s2::S2CellLevel));
|
||||
registry.register(Arc::new(s2::S2CellToToken));
|
||||
registry.register(Arc::new(s2::S2CellParent));
|
||||
|
||||
// spatial data type
|
||||
registry.register(Arc::new(wkt::LatLngToPointWkt));
|
||||
|
||||
// spatial relation
|
||||
registry.register(Arc::new(relation::STContains));
|
||||
registry.register(Arc::new(relation::STWithin));
|
||||
registry.register(Arc::new(relation::STIntersects));
|
||||
|
||||
// spatial measure
|
||||
registry.register(Arc::new(measure::STDistance));
|
||||
registry.register(Arc::new(measure::STDistanceSphere));
|
||||
registry.register(Arc::new(measure::STArea));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::{Scalar, ScalarVectorBuilder};
|
||||
use datatypes::value::{ListValue, Value};
|
||||
use datatypes::vectors::{
|
||||
BooleanVectorBuilder, Int32VectorBuilder, ListVectorBuilder, MutableVector,
|
||||
StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
|
||||
BooleanVectorBuilder, Float64VectorBuilder, Int32VectorBuilder, ListVectorBuilder,
|
||||
MutableVector, StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
|
||||
};
|
||||
use derive_more::Display;
|
||||
use h3o::{CellIndex, LatLng, Resolution};
|
||||
@@ -38,6 +38,7 @@ static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
@@ -952,6 +953,181 @@ impl Function for H3GridPathCells {
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests if cells contains given cells
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellContains;
|
||||
|
||||
impl Function for H3CellContains {
|
||||
fn name(&self) -> &str {
|
||||
"h3_cells_contains"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let multi_cell_types = vec![
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::int64_datatype()),
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::uint64_datatype()),
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::string_datatype()),
|
||||
ConcreteDataType::string_datatype(),
|
||||
];
|
||||
|
||||
let mut signatures = Vec::with_capacity(multi_cell_types.len() * CELL_TYPES.len());
|
||||
for multi_cell_type in &multi_cell_types {
|
||||
for cell_type in CELL_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
multi_cell_type.clone(),
|
||||
cell_type.clone(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cells_vec = &columns[0];
|
||||
let cell_this_vec = &columns[1];
|
||||
|
||||
let size = cell_this_vec.len();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let mut result = None;
|
||||
if let (cells, Some(cell_this)) = (
|
||||
cells_from_value(cells_vec.get(i))?,
|
||||
cell_from_value(cell_this_vec.get(i))?,
|
||||
) {
|
||||
result = Some(false);
|
||||
|
||||
for cell_that in cells.iter() {
|
||||
// get cell resolution, and find cell_this's parent at
|
||||
// this solution, test if cell_that equals the parent
|
||||
let resolution = cell_that.resolution();
|
||||
if let Some(cell_this_parent) = cell_this.parent(resolution) {
|
||||
if cell_this_parent == *cell_that {
|
||||
result = Some(true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get WGS84 great circle distance of two cell centroid
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellDistanceSphereKm;
|
||||
|
||||
impl Function for H3CellDistanceSphereKm {
|
||||
fn name(&self) -> &str {
|
||||
"h3_distance_sphere_km"
|
||||
}
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_double_cells()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_this_vec = &columns[0];
|
||||
let cell_that_vec = &columns[1];
|
||||
let size = cell_this_vec.len();
|
||||
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let result = match (
|
||||
cell_from_value(cell_this_vec.get(i))?,
|
||||
cell_from_value(cell_that_vec.get(i))?,
|
||||
) {
|
||||
(Some(cell_this), Some(cell_that)) => {
|
||||
let centroid_this = LatLng::from(cell_this);
|
||||
let centroid_that = LatLng::from(cell_that);
|
||||
|
||||
Some(centroid_this.distance_km(centroid_that))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get Euclidean distance of two cell centroid
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct H3CellDistanceEuclideanDegree;
|
||||
|
||||
impl H3CellDistanceEuclideanDegree {
|
||||
fn distance(centroid_this: LatLng, centroid_that: LatLng) -> f64 {
|
||||
((centroid_this.lat() - centroid_that.lat()).powi(2)
|
||||
+ (centroid_this.lng() - centroid_that.lng()).powi(2))
|
||||
.sqrt()
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for H3CellDistanceEuclideanDegree {
|
||||
fn name(&self) -> &str {
|
||||
"h3_distance_degree"
|
||||
}
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
signature_of_double_cells()
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let cell_this_vec = &columns[0];
|
||||
let cell_that_vec = &columns[1];
|
||||
let size = cell_this_vec.len();
|
||||
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let result = match (
|
||||
cell_from_value(cell_this_vec.get(i))?,
|
||||
cell_from_value(cell_that_vec.get(i))?,
|
||||
) {
|
||||
(Some(cell_this), Some(cell_that)) => {
|
||||
let centroid_this = LatLng::from(cell_this);
|
||||
let centroid_that = LatLng::from(cell_that);
|
||||
|
||||
let dist = Self::distance(centroid_this, centroid_that);
|
||||
Some(dist)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_resolution(v: Value) -> Result<Resolution> {
|
||||
let r = match v {
|
||||
Value::Int8(v) => v as u8,
|
||||
@@ -1073,7 +1249,126 @@ fn cell_from_value(v: Value) -> Result<Option<CellIndex>> {
|
||||
})
|
||||
.context(error::ExecuteSnafu)?,
|
||||
),
|
||||
Value::String(s) => Some(
|
||||
CellIndex::from_str(s.as_utf8())
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)?,
|
||||
),
|
||||
_ => None,
|
||||
};
|
||||
Ok(cell)
|
||||
}
|
||||
|
||||
/// extract cell array from all possible types including:
|
||||
/// - int64 list
|
||||
/// - uint64 list
|
||||
/// - string list
|
||||
/// - comma-separated string
|
||||
fn cells_from_value(v: Value) -> Result<Vec<CellIndex>> {
|
||||
match v {
|
||||
Value::List(list) => match list.datatype() {
|
||||
ConcreteDataType::Int64(_) => list
|
||||
.items()
|
||||
.iter()
|
||||
.map(|v| {
|
||||
if let Value::Int64(v) = v {
|
||||
CellIndex::try_from(*v as u64)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)
|
||||
} else {
|
||||
Err(BoxedError::new(PlainError::new(
|
||||
"Invalid data type in array".to_string(),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
)))
|
||||
.context(error::ExecuteSnafu)
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<CellIndex>>>(),
|
||||
ConcreteDataType::UInt64(_) => list
|
||||
.items()
|
||||
.iter()
|
||||
.map(|v| {
|
||||
if let Value::UInt64(v) = v {
|
||||
CellIndex::try_from(*v)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)
|
||||
} else {
|
||||
Err(BoxedError::new(PlainError::new(
|
||||
"Invalid data type in array".to_string(),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
)))
|
||||
.context(error::ExecuteSnafu)
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<CellIndex>>>(),
|
||||
ConcreteDataType::String(_) => list
|
||||
.items()
|
||||
.iter()
|
||||
.map(|v| {
|
||||
if let Value::String(v) = v {
|
||||
CellIndex::from_str(v.as_utf8().trim())
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)
|
||||
} else {
|
||||
Err(BoxedError::new(PlainError::new(
|
||||
"Invalid data type in array".to_string(),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
)))
|
||||
.context(error::ExecuteSnafu)
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<CellIndex>>>(),
|
||||
_ => Ok(vec![]),
|
||||
},
|
||||
Value::String(csv) => {
|
||||
let str_seq = csv.as_utf8().split(',');
|
||||
str_seq
|
||||
.map(|v| {
|
||||
CellIndex::from_str(v.trim())
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("H3 error: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)
|
||||
})
|
||||
.collect::<Result<Vec<CellIndex>>>()
|
||||
}
|
||||
_ => Ok(vec![]),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_h3_euclidean_distance() {
|
||||
let point_this = LatLng::new(42.3521, -72.1235).expect("incorrect lat lng");
|
||||
let point_that = LatLng::new(42.45, -72.1260).expect("incorrect lat lng");
|
||||
|
||||
let dist = H3CellDistanceEuclideanDegree::distance(point_this, point_that);
|
||||
assert_eq!(dist, 0.09793191512474639);
|
||||
}
|
||||
}
|
||||
|
||||
195
src/common/function/src/scalars/geo/measure.rs
Normal file
195
src/common/function/src/scalars/geo/measure.rs
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float64VectorBuilder, MutableVector, VectorRef};
|
||||
use derive_more::Display;
|
||||
use geo::algorithm::line_measures::metric_spaces::Euclidean;
|
||||
use geo::{Area, Distance, Haversine};
|
||||
use geo_types::Geometry;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::helpers::{ensure_columns_len, ensure_columns_n};
|
||||
use super::wkt::parse_wkt;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Return WGS84(SRID: 4326) euclidean distance between two geometry object, in degree
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct STDistance;
|
||||
|
||||
impl Function for STDistance {
|
||||
fn name(&self) -> &str {
|
||||
"st_distance"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::new(
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
Volatility::Stable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let wkt_this_vec = &columns[0];
|
||||
let wkt_that_vec = &columns[1];
|
||||
|
||||
let size = wkt_this_vec.len();
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let wkt_this = wkt_this_vec.get(i).as_string();
|
||||
let wkt_that = wkt_that_vec.get(i).as_string();
|
||||
|
||||
let result = match (wkt_this, wkt_that) {
|
||||
(Some(wkt_this), Some(wkt_that)) => {
|
||||
let geom_this = parse_wkt(&wkt_this)?;
|
||||
let geom_that = parse_wkt(&wkt_that)?;
|
||||
|
||||
Some(Euclidean::distance(&geom_this, &geom_that))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Return great circle distance between two geometry object, in meters
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct STDistanceSphere;
|
||||
|
||||
impl Function for STDistanceSphere {
|
||||
fn name(&self) -> &str {
|
||||
"st_distance_sphere_m"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::new(
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
Volatility::Stable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let wkt_this_vec = &columns[0];
|
||||
let wkt_that_vec = &columns[1];
|
||||
|
||||
let size = wkt_this_vec.len();
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let wkt_this = wkt_this_vec.get(i).as_string();
|
||||
let wkt_that = wkt_that_vec.get(i).as_string();
|
||||
|
||||
let result = match (wkt_this, wkt_that) {
|
||||
(Some(wkt_this), Some(wkt_that)) => {
|
||||
let geom_this = parse_wkt(&wkt_this)?;
|
||||
let geom_that = parse_wkt(&wkt_that)?;
|
||||
|
||||
match (geom_this, geom_that) {
|
||||
(Geometry::Point(this), Geometry::Point(that)) => {
|
||||
Some(Haversine::distance(this, that))
|
||||
}
|
||||
_ => {
|
||||
Err(BoxedError::new(PlainError::new(
|
||||
"Great circle distance between non-point objects are not supported for now.".to_string(),
|
||||
StatusCode::Unsupported,
|
||||
))).context(error::ExecuteSnafu)?
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Return area of given geometry object
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct STArea;
|
||||
|
||||
impl Function for STArea {
|
||||
fn name(&self) -> &str {
|
||||
"st_area"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::new(
|
||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
||||
Volatility::Stable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 1);
|
||||
|
||||
let wkt_vec = &columns[0];
|
||||
|
||||
let size = wkt_vec.len();
|
||||
let mut results = Float64VectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let wkt = wkt_vec.get(i).as_string();
|
||||
|
||||
let result = if let Some(wkt) = wkt {
|
||||
let geom = parse_wkt(&wkt)?;
|
||||
Some(geom.unsigned_area())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
190
src/common/function/src/scalars/geo/relation.rs
Normal file
190
src/common/function/src/scalars/geo/relation.rs
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BooleanVectorBuilder, MutableVector, VectorRef};
|
||||
use derive_more::Display;
|
||||
use geo::algorithm::contains::Contains;
|
||||
use geo::algorithm::intersects::Intersects;
|
||||
use geo::algorithm::within::Within;
|
||||
|
||||
use super::helpers::{ensure_columns_len, ensure_columns_n};
|
||||
use super::wkt::parse_wkt;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
/// Test if spatial relationship: contains
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct STContains;
|
||||
|
||||
impl Function for STContains {
|
||||
fn name(&self) -> &str {
|
||||
"st_contains"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::new(
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
Volatility::Stable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let wkt_this_vec = &columns[0];
|
||||
let wkt_that_vec = &columns[1];
|
||||
|
||||
let size = wkt_this_vec.len();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let wkt_this = wkt_this_vec.get(i).as_string();
|
||||
let wkt_that = wkt_that_vec.get(i).as_string();
|
||||
|
||||
let result = match (wkt_this, wkt_that) {
|
||||
(Some(wkt_this), Some(wkt_that)) => {
|
||||
let geom_this = parse_wkt(&wkt_this)?;
|
||||
let geom_that = parse_wkt(&wkt_that)?;
|
||||
|
||||
Some(geom_this.contains(&geom_that))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Test if spatial relationship: within
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct STWithin;
|
||||
|
||||
impl Function for STWithin {
|
||||
fn name(&self) -> &str {
|
||||
"st_within"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::new(
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
Volatility::Stable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let wkt_this_vec = &columns[0];
|
||||
let wkt_that_vec = &columns[1];
|
||||
|
||||
let size = wkt_this_vec.len();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let wkt_this = wkt_this_vec.get(i).as_string();
|
||||
let wkt_that = wkt_that_vec.get(i).as_string();
|
||||
|
||||
let result = match (wkt_this, wkt_that) {
|
||||
(Some(wkt_this), Some(wkt_that)) => {
|
||||
let geom_this = parse_wkt(&wkt_this)?;
|
||||
let geom_that = parse_wkt(&wkt_that)?;
|
||||
|
||||
Some(geom_this.is_within(&geom_that))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// Test if spatial relationship: within
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct STIntersects;
|
||||
|
||||
impl Function for STIntersects {
|
||||
fn name(&self) -> &str {
|
||||
"st_intersects"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::new(
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
Volatility::Stable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let wkt_this_vec = &columns[0];
|
||||
let wkt_that_vec = &columns[1];
|
||||
|
||||
let size = wkt_this_vec.len();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let wkt_this = wkt_this_vec.get(i).as_string();
|
||||
let wkt_that = wkt_that_vec.get(i).as_string();
|
||||
|
||||
let result = match (wkt_this, wkt_that) {
|
||||
(Some(wkt_this), Some(wkt_that)) => {
|
||||
let geom_this = parse_wkt(&wkt_this)?;
|
||||
let geom_that = parse_wkt(&wkt_that)?;
|
||||
|
||||
Some(geom_this.intersects(&geom_that))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
100
src/common/function/src/scalars/geo/wkt.rs
Normal file
100
src/common/function/src/scalars/geo/wkt.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::{BoxedError, PlainError};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_query::error::{self, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use derive_more::Display;
|
||||
use geo_types::{Geometry, Point};
|
||||
use once_cell::sync::Lazy;
|
||||
use snafu::ResultExt;
|
||||
use wkt::{ToWkt, TryFromWkt};
|
||||
|
||||
use super::helpers::{ensure_columns_len, ensure_columns_n};
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
|
||||
vec![
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ConcreteDataType::float64_datatype(),
|
||||
]
|
||||
});
|
||||
|
||||
/// Return WGS84(SRID: 4326) euclidean distance between two geometry object, in degree
|
||||
#[derive(Clone, Debug, Default, Display)]
|
||||
#[display("{}", self.name())]
|
||||
pub struct LatLngToPointWkt;
|
||||
|
||||
impl Function for LatLngToPointWkt {
|
||||
fn name(&self) -> &str {
|
||||
"wkt_point_from_latlng"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
let mut signatures = Vec::new();
|
||||
for coord_type in COORDINATE_TYPES.as_slice() {
|
||||
signatures.push(TypeSignature::Exact(vec![
|
||||
// latitude
|
||||
coord_type.clone(),
|
||||
// longitude
|
||||
coord_type.clone(),
|
||||
]));
|
||||
}
|
||||
Signature::one_of(signatures, Volatility::Stable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure_columns_n!(columns, 2);
|
||||
|
||||
let lat_vec = &columns[0];
|
||||
let lng_vec = &columns[1];
|
||||
|
||||
let size = lat_vec.len();
|
||||
let mut results = StringVectorBuilder::with_capacity(size);
|
||||
|
||||
for i in 0..size {
|
||||
let lat = lat_vec.get(i).as_f64_lossy();
|
||||
let lng = lng_vec.get(i).as_f64_lossy();
|
||||
|
||||
let result = match (lat, lng) {
|
||||
(Some(lat), Some(lng)) => Some(Point::new(lng, lat).wkt_string()),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
|
||||
Ok(results.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn parse_wkt(s: &str) -> Result<Geometry> {
|
||||
Geometry::try_from_wkt_str(s)
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
format!("Fail to parse WKT: {}", e),
|
||||
StatusCode::EngineExecuteQuery,
|
||||
))
|
||||
})
|
||||
.context(error::ExecuteSnafu)
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
@@ -41,10 +41,24 @@ impl Function for JsonPathExistsFunction {
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
Signature::one_of(
|
||||
vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::null_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::null_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::null_datatype(),
|
||||
ConcreteDataType::null_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
@@ -64,25 +78,26 @@ impl Function for JsonPathExistsFunction {
|
||||
let paths = &columns[1];
|
||||
|
||||
let size = jsons.len();
|
||||
let datatype = jsons.data_type();
|
||||
let mut results = BooleanVectorBuilder::with_capacity(size);
|
||||
|
||||
match datatype {
|
||||
// JSON data type uses binary vector
|
||||
ConcreteDataType::Binary(_) => {
|
||||
match (jsons.data_type(), paths.data_type()) {
|
||||
(ConcreteDataType::Binary(_), ConcreteDataType::String(_)) => {
|
||||
for i in 0..size {
|
||||
let json = jsons.get_ref(i);
|
||||
let path = paths.get_ref(i);
|
||||
|
||||
let json = json.as_binary();
|
||||
let path = path.as_string();
|
||||
let result = match (json, path) {
|
||||
let result = match (jsons.get_ref(i).as_binary(), paths.get_ref(i).as_string())
|
||||
{
|
||||
(Ok(Some(json)), Ok(Some(path))) => {
|
||||
let json_path = jsonb::jsonpath::parse_json_path(path.as_bytes());
|
||||
match json_path {
|
||||
Ok(json_path) => jsonb::path_exists(json, json_path).ok(),
|
||||
Err(_) => None,
|
||||
}
|
||||
// Get `JsonPath`.
|
||||
let json_path = match jsonb::jsonpath::parse_json_path(path.as_bytes())
|
||||
{
|
||||
Ok(json_path) => json_path,
|
||||
Err(_) => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Illegal json path: {:?}", path),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
jsonb::path_exists(json, json_path).ok()
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
@@ -90,6 +105,12 @@ impl Function for JsonPathExistsFunction {
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
// Any null args existence causes the result to be NULL.
|
||||
(ConcreteDataType::Null(_), ConcreteDataType::String(_)) => results.push_nulls(size),
|
||||
(ConcreteDataType::Binary(_), ConcreteDataType::Null(_)) => results.push_nulls(size),
|
||||
(ConcreteDataType::Null(_), ConcreteDataType::Null(_)) => results.push_nulls(size),
|
||||
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
@@ -114,8 +135,8 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, StringVector};
|
||||
use datatypes::prelude::ScalarVector;
|
||||
use datatypes::vectors::{BinaryVector, NullVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -133,9 +154,27 @@ mod tests {
|
||||
|
||||
assert!(matches!(json_path_exists.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
type_signature: TypeSignature::OneOf(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
|
||||
} if valid_types ==
|
||||
vec![
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::null_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::json_datatype(),
|
||||
ConcreteDataType::null_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::null_datatype(),
|
||||
ConcreteDataType::null_datatype(),
|
||||
]),
|
||||
],
|
||||
));
|
||||
|
||||
let json_strings = [
|
||||
@@ -143,9 +182,15 @@ mod tests {
|
||||
r#"{"a": 4, "b": {"c": 6}, "c": 6}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
r#"[1, 2, 3]"#,
|
||||
r#"null"#,
|
||||
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
|
||||
r#"null"#,
|
||||
];
|
||||
let paths = vec!["$.a.b.c", "$.b", "$.c.a", ".d"];
|
||||
let results = [false, true, true, false];
|
||||
let paths = vec![
|
||||
"$.a.b.c", "$.b", "$.c.a", ".d", "$[0]", "$.a", "null", "null",
|
||||
];
|
||||
let expected = [false, true, true, false, true, false, false, false];
|
||||
|
||||
let jsonbs = json_strings
|
||||
.iter()
|
||||
@@ -162,11 +207,44 @@ mod tests {
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, gt) in results.iter().enumerate() {
|
||||
// Test for non-nulls.
|
||||
assert_eq!(8, vector.len());
|
||||
for (i, real) in expected.iter().enumerate() {
|
||||
let result = vector.get_ref(i);
|
||||
let result = result.as_boolean().unwrap().unwrap();
|
||||
assert_eq!(*gt, result);
|
||||
assert!(!result.is_null());
|
||||
let val = result.as_boolean().unwrap().unwrap();
|
||||
assert_eq!(val, *real);
|
||||
}
|
||||
|
||||
// Test for path error.
|
||||
let json_bytes = jsonb::parse_value("{}".as_bytes()).unwrap().to_vec();
|
||||
let json = BinaryVector::from_vec(vec![json_bytes]);
|
||||
let illegal_path = StringVector::from_vec(vec!["$..a"]);
|
||||
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json), Arc::new(illegal_path)];
|
||||
let err = json_path_exists.eval(FunctionContext::default(), &args);
|
||||
assert!(err.is_err());
|
||||
|
||||
// Test for nulls.
|
||||
let json_bytes = jsonb::parse_value("{}".as_bytes()).unwrap().to_vec();
|
||||
let json = BinaryVector::from_vec(vec![json_bytes]);
|
||||
let null_json = NullVector::new(1);
|
||||
|
||||
let path = StringVector::from_vec(vec!["$.a"]);
|
||||
let null_path = NullVector::new(1);
|
||||
|
||||
let args: Vec<VectorRef> = vec![Arc::new(null_json), Arc::new(path)];
|
||||
let result1 = json_path_exists
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
let args: Vec<VectorRef> = vec![Arc::new(json), Arc::new(null_path)];
|
||||
let result2 = json_path_exists
|
||||
.eval(FunctionContext::default(), &args)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result1.len(), 1);
|
||||
assert!(result1.get_ref(0).is_null());
|
||||
assert_eq!(result2.len(), 1);
|
||||
assert!(result2.get_ref(0).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,8 +22,12 @@ use datafusion::arrow::compute::kernels::cmp::gt;
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
||||
use datatypes::arrow::datatypes::{
|
||||
DataType as ArrowDataType, Date32Type, Date64Type, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
@@ -34,13 +38,47 @@ pub struct GreatestFunction;
|
||||
|
||||
const NAME: &str = "greatest";
|
||||
|
||||
macro_rules! gt_time_types {
|
||||
($ty: ident, $columns:expr) => {{
|
||||
let column1 = $columns[0].to_arrow_array();
|
||||
let column2 = $columns[1].to_arrow_array();
|
||||
|
||||
let column1 = column1.as_primitive::<$ty>();
|
||||
let column2 = column2.as_primitive::<$ty>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
|
||||
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
|
||||
}};
|
||||
}
|
||||
|
||||
impl Function for GreatestFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::date_datatype())
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
ensure!(
|
||||
input_types.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
input_types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
match &input_types[0] {
|
||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::datetime_datatype()),
|
||||
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
|
||||
ConcreteDataType::DateTime(_) => Ok(ConcreteDataType::datetime_datatype()),
|
||||
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: input_types,
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
@@ -49,6 +87,11 @@ impl Function for GreatestFunction {
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
@@ -66,27 +109,32 @@ impl Function for GreatestFunction {
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date32)
|
||||
// Treats string as `DateTime` type.
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
||||
let column1 = column1.as_primitive::<Date64Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let column1 = columns[0].to_arrow_array();
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = columns[1].to_arrow_array();
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let column2 = column2.as_primitive::<Date64Type>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
|
||||
ConcreteDataType::DateTime(_) => gt_time_types!(Date64Type, columns),
|
||||
ConcreteDataType::Timestamp(ts_type) => match ts_type {
|
||||
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
|
||||
TimestampType::Millisecond(_) => {
|
||||
gt_time_types!(TimestampMillisecondType, columns)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
gt_time_types!(TimestampMicrosecondType, columns)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
gt_time_types!(TimestampNanosecondType, columns)
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
@@ -106,19 +154,31 @@ impl fmt::Display for GreatestFunction {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::Date;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::DateType;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Timestamp};
|
||||
use datatypes::types::{
|
||||
DateTimeType, DateType, TimestampMicrosecondType, TimestampMillisecondType,
|
||||
TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{DateVector, StringVector, Vector};
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, StringVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::DateTime(DateTimeType)
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
@@ -132,15 +192,15 @@ mod tests {
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str_utc("2001-02-01").unwrap())
|
||||
Value::DateTime(DateTime::from_str("2001-02-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str_utc("2012-12-23").unwrap())
|
||||
Value::DateTime(DateTime::from_str("2012-12-23 00:00:00", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
@@ -148,9 +208,15 @@ mod tests {
|
||||
fn test_greatest_takes_date_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::date_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
||||
@@ -168,4 +234,81 @@ mod tests {
|
||||
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_datetime_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::DateTime(DateTimeType)
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateTimeVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateTimeVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! test_timestamp {
|
||||
($type: expr,$unit: ident) => {
|
||||
paste! {
|
||||
#[test]
|
||||
fn [<test_greatest_takes_ $unit:lower _vector>]() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[$type, $type]).unwrap(),
|
||||
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
Nanosecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
Microsecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
Millisecond
|
||||
);
|
||||
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
|
||||
}
|
||||
|
||||
35
src/common/function/src/scalars/vector.rs
Normal file
35
src/common/function/src/scalars/vector.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct VectorFunction;
|
||||
|
||||
impl VectorFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// conversion
|
||||
registry.register(Arc::new(convert::ParseVectorFunction));
|
||||
registry.register(Arc::new(convert::VectorToStringFunction));
|
||||
|
||||
// distance
|
||||
registry.register(Arc::new(distance::CosDistanceFunction));
|
||||
registry.register(Arc::new(distance::DotProductFunction));
|
||||
registry.register(Arc::new(distance::L2SqDistanceFunction));
|
||||
}
|
||||
}
|
||||
@@ -12,17 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod parse_vector;
|
||||
mod vector_to_string;
|
||||
|
||||
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
|
||||
|
||||
use super::trace::span::TraceSpans;
|
||||
|
||||
/// Transformer helps to transform ExportTraceServiceRequest based on logic, like:
|
||||
/// - uplift some fields from Attributes (Map type) to column
|
||||
pub trait TraceParser: Send + Sync {
|
||||
fn parse(&self, request: ExportTraceServiceRequest) -> TraceSpans;
|
||||
fn table_name(&self) -> String;
|
||||
}
|
||||
|
||||
pub type TraceParserRef = Arc<dyn TraceParser>;
|
||||
pub use parse_vector::ParseVectorFunction;
|
||||
pub use vector_to_string::VectorToStringFunction;
|
||||
160
src/common/function/src/scalars/vector/convert/parse_vector.rs
Normal file
160
src/common/function/src/scalars/vector/convert/parse_vector.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, InvalidVectorStringSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::types::parse_string_to_vector_type_value;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const NAME: &str = "parse_vec";
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ParseVectorFunction;
|
||||
|
||||
impl Function for ParseVectorFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let column = &columns[0];
|
||||
let size = column.len();
|
||||
|
||||
let mut result = BinaryVectorBuilder::with_capacity(size);
|
||||
for i in 0..size {
|
||||
let value = column.get(i).as_string();
|
||||
if let Some(value) = value {
|
||||
let res = parse_string_to_vector_type_value(&value, None)
|
||||
.context(InvalidVectorStringSnafu { vec_str: &value })?;
|
||||
result.push(Some(&res));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParseVectorFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::bytes::Bytes;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vector() {
|
||||
let func = ParseVectorFunction;
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Binary(Bytes::from(
|
||||
[1.0f32, 2.0, 3.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Binary(Bytes::from(
|
||||
[4.0f32, 5.0, 6.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
))
|
||||
);
|
||||
assert!(result.get(2).is_null());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_vector_error() {
|
||||
let func = ParseVectorFunction;
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("7.0,8.0,9.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,hello,9.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::types::vector_type_value_to_string;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const NAME: &str = "vec_to_string";
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorToStringFunction;
|
||||
|
||||
impl Function for VectorToStringFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::binary_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let column = &columns[0];
|
||||
let size = column.len();
|
||||
|
||||
let mut result = StringVectorBuilder::with_capacity(size);
|
||||
for i in 0..size {
|
||||
let value = column.get(i);
|
||||
match value {
|
||||
Value::Binary(bytes) => {
|
||||
let len = bytes.len();
|
||||
if len % std::mem::size_of::<f32>() != 0 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid binary length of vector: {}", len),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let dim = len / std::mem::size_of::<f32>();
|
||||
// Safety: `dim` is calculated from the length of `bytes` and is guaranteed to be valid
|
||||
let res = vector_type_value_to_string(&bytes, dim as _).unwrap();
|
||||
result.push(Some(&res));
|
||||
}
|
||||
Value::Null => {
|
||||
result.push_null();
|
||||
}
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid value type: {:?}", value.data_type()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorToStringFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::BinaryVectorBuilder;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vector_to_string() {
|
||||
let func = VectorToStringFunction;
|
||||
|
||||
let mut builder = BinaryVectorBuilder::with_capacity(3);
|
||||
builder.push(Some(
|
||||
[1.0f32, 2.0, 3.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<_>>()
|
||||
.as_slice(),
|
||||
));
|
||||
builder.push(Some(
|
||||
[4.0f32, 5.0, 6.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<_>>()
|
||||
.as_slice(),
|
||||
));
|
||||
builder.push_null();
|
||||
let vector = builder.to_vector();
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[vector]).unwrap();
|
||||
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get(0), Value::String("[1,2,3]".to_string().into()));
|
||||
assert_eq!(result.get(1), Value::String("[4,5,6]".to_string().into()));
|
||||
assert_eq!(result.get(2), Value::Null);
|
||||
}
|
||||
}
|
||||
482
src/common/function/src/scalars/vector/distance.rs
Normal file
482
src/common/function/src/scalars/vector/distance.rs
Normal file
@@ -0,0 +1,482 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cos;
|
||||
mod dot;
|
||||
mod l2sq;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
macro_rules! define_distance_function {
|
||||
($StructName:ident, $display_name:expr, $similarity_method:path) => {
|
||||
|
||||
/// A function calculates the distance between two vectors.
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct $StructName;
|
||||
|
||||
impl Function for $StructName {
|
||||
fn name(&self) -> &str {
|
||||
$display_name
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let size = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(size);
|
||||
if size == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = parse_if_constant_string(arg0)?;
|
||||
let arg1_const = parse_if_constant_string(arg1)?;
|
||||
|
||||
for i in 0..size {
|
||||
let vec0 = match arg0_const.as_ref() {
|
||||
Some(a) => Some(Cow::Borrowed(a.as_slice())),
|
||||
None => as_vector(arg0.get_ref(i))?,
|
||||
};
|
||||
let vec1 = match arg1_const.as_ref() {
|
||||
Some(b) => Some(Cow::Borrowed(b.as_slice())),
|
||||
None => as_vector(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(vec0), Some(vec1)) = (vec0, vec1) {
|
||||
ensure!(
|
||||
vec0.len() == vec1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the vectors must match to calculate distance, have: {} vs {}",
|
||||
vec0.len(),
|
||||
vec1.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
// Checked if the length of the vectors match
|
||||
let d = $similarity_method(vec0.as_ref(), vec1.as_ref());
|
||||
result.push(Some(d));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for $StructName {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", $display_name.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_distance_function!(CosDistanceFunction, "vec_cos_distance", cos::cos);
|
||||
define_distance_function!(L2SqDistanceFunction, "vec_l2sq_distance", l2sq::l2sq);
|
||||
define_distance_function!(DotProductFunction, "vec_dot_product", dot::dot);
|
||||
|
||||
/// Parse a vector value if the value is a constant string.
|
||||
fn parse_if_constant_string(arg: &Arc<dyn Vector>) -> Result<Option<Vec<f32>>> {
|
||||
if !arg.is_const() {
|
||||
return Ok(None);
|
||||
}
|
||||
if arg.data_type() != ConcreteDataType::string_datatype() {
|
||||
return Ok(None);
|
||||
}
|
||||
arg.get_ref(0)
|
||||
.as_string()
|
||||
.unwrap() // Safe: checked if it is a string
|
||||
.map(parse_f32_vector_from_string)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Convert a value to a vector value.
|
||||
/// Supported data types are binary and string.
|
||||
fn as_vector(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
|
||||
match arg.data_type() {
|
||||
ConcreteDataType::Binary(_) => arg
|
||||
.as_binary()
|
||||
.unwrap() // Safe: checked if it is a binary
|
||||
.map(binary_as_vector)
|
||||
.transpose(),
|
||||
ConcreteDataType::String(_) => arg
|
||||
.as_string()
|
||||
.unwrap() // Safe: checked if it is a string
|
||||
.map(|s| Ok(Cow::Owned(parse_f32_vector_from_string(s)?)))
|
||||
.transpose(),
|
||||
ConcreteDataType::Null(_) => Ok(None),
|
||||
_ => InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a u8 slice to a vector value.
|
||||
fn binary_as_vector(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
|
||||
if bytes.len() % std::mem::size_of::<f32>() != 0 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if cfg!(target_endian = "little") {
|
||||
Ok(unsafe {
|
||||
let vec = std::slice::from_raw_parts(
|
||||
bytes.as_ptr() as *const f32,
|
||||
bytes.len() / std::mem::size_of::<f32>(),
|
||||
);
|
||||
Cow::Borrowed(vec)
|
||||
})
|
||||
} else {
|
||||
let v = bytes
|
||||
.chunks_exact(std::mem::size_of::<f32>())
|
||||
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
|
||||
.collect::<Vec<f32>>();
|
||||
Ok(Cow::Owned(v))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a string to a vector value.
|
||||
/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
|
||||
fn parse_f32_vector_from_string(s: &str) -> Result<Vec<f32>> {
|
||||
let trimmed = s.trim();
|
||||
if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to parse {s} to Vector value: not properly enclosed in brackets"
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let content = trimmed[1..trimmed.len() - 1].trim();
|
||||
if content.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
content
|
||||
.split(',')
|
||||
.map(|s| s.trim().parse::<f32>())
|
||||
.collect::<std::result::Result<_, _>>()
|
||||
.map_err(|e| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Failed to parse {s} to Vector value: {e}"),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{BinaryVector, ConstantVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_distance_string_string() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[1.0, 0.0]"),
|
||||
None,
|
||||
Some("[1.0, 0.0]"),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[0.0, 1.0]"),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec2, vec1])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_binary_binary() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(BinaryVector::from(vec![
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 128, 63, 0, 0, 0, 0]),
|
||||
None,
|
||||
Some(vec![0, 0, 128, 63, 0, 0, 0, 0]),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(BinaryVector::from(vec![
|
||||
// [0.0, 1.0]
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec2, vec1])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_string_binary() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[1.0, 0.0]"),
|
||||
None,
|
||||
Some("[1.0, 0.0]"),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(BinaryVector::from(vec![
|
||||
// [0.0, 1.0]
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec2, vec1])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_const_string() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let const_str = Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from(vec!["[0.0, 1.0]"])),
|
||||
4,
|
||||
));
|
||||
|
||||
let vec1 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[1.0, 0.0]"),
|
||||
None,
|
||||
Some("[1.0, 0.0]"),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(BinaryVector::from(vec![
|
||||
// [0.0, 1.0]
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[const_str.clone(), vec1.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(!result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[vec1.clone(), const_str.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(!result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[const_str.clone(), vec2.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(!result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[vec2.clone(), const_str.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(!result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_vector_length() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(StringVector::from(vec!["[1.0]"])) as VectorRef;
|
||||
let vec2 = Arc::new(StringVector::from(vec!["[1.0, 1.0]"])) as VectorRef;
|
||||
let result = func.eval(FunctionContext::default(), &[vec1, vec2]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let vec1 = Arc::new(BinaryVector::from(vec![vec![0, 0, 128, 63]])) as VectorRef;
|
||||
let vec2 =
|
||||
Arc::new(BinaryVector::from(vec![vec![0, 0, 128, 63, 0, 0, 0, 64]])) as VectorRef;
|
||||
let result = func.eval(FunctionContext::default(), &[vec1, vec2]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_vector_from_string() {
|
||||
let result = parse_f32_vector_from_string("[1.0, 2.0, 3.0]").unwrap();
|
||||
assert_eq!(result, vec![1.0, 2.0, 3.0]);
|
||||
|
||||
let result = parse_f32_vector_from_string("[]").unwrap();
|
||||
assert_eq!(result, Vec::<f32>::new());
|
||||
|
||||
let result = parse_f32_vector_from_string("[1.0, a, 3.0]");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_as_vector() {
|
||||
let bytes = [0, 0, 128, 63];
|
||||
let result = binary_as_vector(&bytes).unwrap();
|
||||
assert_eq!(result.as_ref(), &[1.0]);
|
||||
|
||||
let invalid_bytes = [0, 0, 128];
|
||||
let result = binary_as_vector(&invalid_bytes);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
87
src/common/function/src/scalars/vector/distance/cos.rs
Normal file
87
src/common/function/src/scalars/vector/distance/cos.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use nalgebra::DVectorView;
|
||||
|
||||
/// Calculates the cos distance between two vectors.
|
||||
///
|
||||
/// **Note:** Must ensure that the length of the two vectors are the same.
|
||||
pub fn cos(lhs: &[f32], rhs: &[f32]) -> f32 {
|
||||
let lhs_vec = DVectorView::from_slice(lhs, lhs.len());
|
||||
let rhs_vec = DVectorView::from_slice(rhs, rhs.len());
|
||||
|
||||
let dot_product = lhs_vec.dot(&rhs_vec);
|
||||
let lhs_norm = lhs_vec.norm();
|
||||
let rhs_norm = rhs_vec.norm();
|
||||
if dot_product.abs() < f32::EPSILON
|
||||
|| lhs_norm.abs() < f32::EPSILON
|
||||
|| rhs_norm.abs() < f32::EPSILON
|
||||
{
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
let cos_similar = dot_product / (lhs_norm * rhs_norm);
|
||||
let res = 1.0 - cos_similar;
|
||||
if res.abs() < f32::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_cos_scalar() {
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.025, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.04, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.04, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
}
|
||||
}
|
||||
71
src/common/function/src/scalars/vector/distance/dot.rs
Normal file
71
src/common/function/src/scalars/vector/distance/dot.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use nalgebra::DVectorView;
|
||||
|
||||
/// Calculates the dot product between two vectors.
|
||||
///
|
||||
/// **Note:** Must ensure that the length of the two vectors are the same.
|
||||
pub fn dot(lhs: &[f32], rhs: &[f32]) -> f32 {
|
||||
let lhs = DVectorView::from_slice(lhs, lhs.len());
|
||||
let rhs = DVectorView::from_slice(rhs, rhs.len());
|
||||
|
||||
lhs.dot(&rhs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dot_scalar() {
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 14.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 32.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 50.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 50.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 122.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 194.0, epsilon = 1e-2);
|
||||
}
|
||||
}
|
||||
71
src/common/function/src/scalars/vector/distance/l2sq.rs
Normal file
71
src/common/function/src/scalars/vector/distance/l2sq.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use nalgebra::DVectorView;
|
||||
|
||||
/// Calculates the squared L2 distance between two vectors.
|
||||
///
|
||||
/// **Note:** Must ensure that the length of the two vectors are the same.
|
||||
pub fn l2sq(lhs: &[f32], rhs: &[f32]) -> f32 {
|
||||
let lhs = DVectorView::from_slice(lhs, lhs.len());
|
||||
let rhs = DVectorView::from_slice(rhs, rhs.len());
|
||||
|
||||
(lhs - rhs).norm_squared()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_l2sq_scalar() {
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 27.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 108.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 14.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 77.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 194.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 108.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 27.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
}
|
||||
}
|
||||
@@ -14,28 +14,30 @@
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::add_column_location::LocationType;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::column_def::as_fulltext_option;
|
||||
use api::v1::{
|
||||
column_def, AddColumnLocation as Location, AlterExpr, ChangeColumnTypes, CreateTableExpr,
|
||||
DropColumns, RenameTable, SemanticType,
|
||||
column_def, AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr,
|
||||
DropColumns, ModifyColumnTypes, RenameTable, SemanticType,
|
||||
};
|
||||
use common_query::AddColumnLocation;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::region_request::ChangeOption;
|
||||
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest};
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest};
|
||||
|
||||
use crate::error::{
|
||||
InvalidChangeTableOptionRequestSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
||||
UnknownLocationTypeSnafu,
|
||||
};
|
||||
|
||||
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
|
||||
|
||||
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
/// Convert an [`AlterTableExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<AlterTableRequest> {
|
||||
let catalog_name = expr.catalog_name;
|
||||
let schema_name = expr.schema_name;
|
||||
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
|
||||
@@ -66,25 +68,25 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
columns: add_column_requests,
|
||||
}
|
||||
}
|
||||
Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||
change_column_types,
|
||||
Kind::ModifyColumnTypes(ModifyColumnTypes {
|
||||
modify_column_types,
|
||||
}) => {
|
||||
let change_column_type_requests = change_column_types
|
||||
let modify_column_type_requests = modify_column_types
|
||||
.into_iter()
|
||||
.map(|cct| {
|
||||
let target_type =
|
||||
ColumnDataTypeWrapper::new(cct.target_type(), cct.target_type_extension)
|
||||
.into();
|
||||
|
||||
Ok(ChangeColumnTypeRequest {
|
||||
Ok(ModifyColumnTypeRequest {
|
||||
column_name: cct.column_name,
|
||||
target_type,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
AlterKind::ChangeColumnTypes {
|
||||
columns: change_column_type_requests,
|
||||
AlterKind::ModifyColumnTypes {
|
||||
columns: modify_column_type_requests,
|
||||
}
|
||||
}
|
||||
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
|
||||
@@ -93,14 +95,36 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
Kind::RenameTable(RenameTable { new_table_name }) => {
|
||||
AlterKind::RenameTable { new_table_name }
|
||||
}
|
||||
Kind::ChangeTableOptions(api::v1::ChangeTableOptions {
|
||||
change_table_options,
|
||||
}) => AlterKind::ChangeTableOptions {
|
||||
options: change_table_options
|
||||
.iter()
|
||||
.map(ChangeOption::try_from)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidChangeTableOptionRequestSnafu)?,
|
||||
Kind::SetTableOptions(api::v1::SetTableOptions { table_options }) => {
|
||||
AlterKind::SetTableOptions {
|
||||
options: table_options
|
||||
.iter()
|
||||
.map(SetRegionOption::try_from)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidSetTableOptionRequestSnafu)?,
|
||||
}
|
||||
}
|
||||
Kind::UnsetTableOptions(api::v1::UnsetTableOptions { keys }) => {
|
||||
AlterKind::UnsetTableOptions {
|
||||
keys: keys
|
||||
.iter()
|
||||
.map(|key| UnsetRegionOption::try_from(key.as_str()))
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidUnsetTableOptionRequestSnafu)?,
|
||||
}
|
||||
}
|
||||
Kind::SetColumnFulltext(c) => AlterKind::SetColumnFulltext {
|
||||
column_name: c.column_name,
|
||||
options: FulltextOptions {
|
||||
enable: c.enable,
|
||||
analyzer: as_fulltext_option(
|
||||
Analyzer::try_from(c.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: c.case_sensitive,
|
||||
},
|
||||
},
|
||||
Kind::UnsetColumnFulltext(c) => AlterKind::UnsetColumnFulltext {
|
||||
column_name: c.column_name,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -170,7 +194,7 @@ fn parse_location(location: Option<Location>) -> Result<Option<AddColumnLocation
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, ChangeColumnType, ColumnDataType, ColumnDef, DropColumn,
|
||||
AddColumn, AddColumns, ColumnDataType, ColumnDef, DropColumn, ModifyColumnType,
|
||||
SemanticType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
@@ -179,7 +203,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_alter_expr_to_request() {
|
||||
let expr = AlterExpr {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: String::default(),
|
||||
schema_name: String::default(),
|
||||
table_name: "monitor".to_string(),
|
||||
@@ -220,7 +244,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_alter_expr_with_location_to_request() {
|
||||
let expr = AlterExpr {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: String::default(),
|
||||
schema_name: String::default(),
|
||||
table_name: "monitor".to_string(),
|
||||
@@ -296,14 +320,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_change_column_type_expr() {
|
||||
let expr = AlterExpr {
|
||||
fn test_modify_column_type_expr() {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: "test_catalog".to_string(),
|
||||
schema_name: "test_schema".to_string(),
|
||||
table_name: "monitor".to_string(),
|
||||
|
||||
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||
change_column_types: vec![ChangeColumnType {
|
||||
kind: Some(Kind::ModifyColumnTypes(ModifyColumnTypes {
|
||||
modify_column_types: vec![ModifyColumnType {
|
||||
column_name: "mem_usage".to_string(),
|
||||
target_type: ColumnDataType::String as i32,
|
||||
target_type_extension: None,
|
||||
@@ -316,22 +340,22 @@ mod tests {
|
||||
assert_eq!(alter_request.schema_name, "test_schema");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
|
||||
let mut change_column_types = match alter_request.alter_kind {
|
||||
AlterKind::ChangeColumnTypes { columns } => columns,
|
||||
let mut modify_column_types = match alter_request.alter_kind {
|
||||
AlterKind::ModifyColumnTypes { columns } => columns,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let change_column_type = change_column_types.pop().unwrap();
|
||||
assert_eq!("mem_usage", change_column_type.column_name);
|
||||
let modify_column_type = modify_column_types.pop().unwrap();
|
||||
assert_eq!("mem_usage", modify_column_type.column_name);
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
change_column_type.target_type
|
||||
modify_column_type.target_type
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_column_expr() {
|
||||
let expr = AlterExpr {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: "test_catalog".to_string(),
|
||||
schema_name: "test_schema".to_string(),
|
||||
table_name: "monitor".to_string(),
|
||||
|
||||
@@ -120,11 +120,25 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid change table option request"))]
|
||||
InvalidChangeTableOptionRequest {
|
||||
#[snafu(display("Invalid set table option request"))]
|
||||
InvalidSetTableOptionRequest {
|
||||
#[snafu(source)]
|
||||
error: MetadataError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid unset table option request"))]
|
||||
InvalidUnsetTableOptionRequest {
|
||||
#[snafu(source)]
|
||||
error: MetadataError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid set fulltext option request"))]
|
||||
InvalidSetFulltextOptionRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -148,7 +162,9 @@ impl ErrorExt for Error {
|
||||
Error::UnknownColumnDataType { .. } | Error::InvalidFulltextColumnType { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::InvalidChangeTableOptionRequest { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidSetTableOptionRequest { .. }
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
use api::v1::column_def::contains_fulltext;
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, Column, ColumnDataType, ColumnDataTypeExtension, ColumnDef,
|
||||
ColumnOptions, ColumnSchema, CreateTableExpr, SemanticType,
|
||||
ColumnOptions, ColumnSchema, CreateTableExpr, JsonTypeExtension, SemanticType,
|
||||
};
|
||||
use datatypes::schema::Schema;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -25,8 +26,9 @@ use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{
|
||||
DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu, InvalidFulltextColumnTypeSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownColumnDataTypeSnafu,
|
||||
self, DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu,
|
||||
InvalidFulltextColumnTypeSnafu, MissingTimestampColumnSnafu, Result,
|
||||
UnknownColumnDataTypeSnafu,
|
||||
};
|
||||
pub struct ColumnExpr<'a> {
|
||||
pub column_name: &'a str,
|
||||
@@ -72,6 +74,28 @@ impl<'a> From<&'a ColumnSchema> for ColumnExpr<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn infer_column_datatype(
|
||||
datatype: i32,
|
||||
datatype_extension: &Option<ColumnDataTypeExtension>,
|
||||
) -> Result<ColumnDataType> {
|
||||
let column_type =
|
||||
ColumnDataType::try_from(datatype).context(UnknownColumnDataTypeSnafu { datatype })?;
|
||||
|
||||
if matches!(&column_type, ColumnDataType::Binary) {
|
||||
if let Some(ext) = datatype_extension {
|
||||
let type_ext = ext
|
||||
.type_ext
|
||||
.as_ref()
|
||||
.context(error::MissingFieldSnafu { field: "type_ext" })?;
|
||||
if *type_ext == TypeExt::JsonType(JsonTypeExtension::JsonBinary.into()) {
|
||||
return Ok(ColumnDataType::Json);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(column_type)
|
||||
}
|
||||
|
||||
pub fn build_create_table_expr(
|
||||
table_id: Option<TableId>,
|
||||
table_name: &TableReference<'_>,
|
||||
@@ -124,8 +148,7 @@ pub fn build_create_table_expr(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let column_type =
|
||||
ColumnDataType::try_from(datatype).context(UnknownColumnDataTypeSnafu { datatype })?;
|
||||
let column_type = infer_column_datatype(datatype, datatype_extension)?;
|
||||
|
||||
ensure!(
|
||||
!contains_fulltext(options) || column_type == ColumnDataType::String,
|
||||
|
||||
@@ -218,6 +218,12 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
Decimal128Vector,
|
||||
decimal128_values,
|
||||
|x| { convert_to_pb_decimal128(x) }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Vector(_),
|
||||
BinaryVector,
|
||||
binary_values,
|
||||
|x| { x.into() }
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
hex = { version = "0.4" }
|
||||
hex.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -77,7 +77,7 @@ pub struct RegionStat {
|
||||
pub rcus: i64,
|
||||
/// The write capacity units during this period
|
||||
pub wcus: i64,
|
||||
/// Approximate bytes of this region
|
||||
/// Approximate disk bytes of this region, including sst, index, manifest and wal
|
||||
pub approximate_bytes: u64,
|
||||
/// The engine name.
|
||||
pub engine: String,
|
||||
|
||||
@@ -32,6 +32,7 @@ use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::{ClusterId, DatanodeId};
|
||||
|
||||
pub mod alter_database;
|
||||
pub mod alter_logical_tables;
|
||||
pub mod alter_table;
|
||||
pub mod create_database;
|
||||
|
||||
248
src/common/meta/src/ddl/alter_database.rs
Normal file
248
src/common/meta/src/ddl/alter_database.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::tracing::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use super::utils::handle_retry_error;
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, SchemaNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue};
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock};
|
||||
use crate::rpc::ddl::UnsetDatabaseOption::{self};
|
||||
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
|
||||
use crate::ClusterId;
|
||||
|
||||
pub struct AlterDatabaseProcedure {
|
||||
pub context: DdlContext,
|
||||
pub data: AlterDatabaseData,
|
||||
}
|
||||
|
||||
fn build_new_schema_value(
|
||||
mut value: SchemaNameValue,
|
||||
alter_kind: &AlterDatabaseKind,
|
||||
) -> Result<SchemaNameValue> {
|
||||
match alter_kind {
|
||||
AlterDatabaseKind::SetDatabaseOptions(options) => {
|
||||
for option in options.0.iter() {
|
||||
match option {
|
||||
SetDatabaseOption::Ttl(ttl) => {
|
||||
if ttl.is_zero() {
|
||||
value.ttl = None;
|
||||
} else {
|
||||
value.ttl = Some(*ttl);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AlterDatabaseKind::UnsetDatabaseOptions(keys) => {
|
||||
for key in keys.0.iter() {
|
||||
match key {
|
||||
UnsetDatabaseOption::Ttl => value.ttl = None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
impl AlterDatabaseProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: AlterDatabaseTask,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterDatabaseData::new(task, cluster_id)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
pub async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let value = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.get(SchemaNameKey::new(self.data.catalog(), self.data.schema()))
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
value.is_some(),
|
||||
SchemaNotFoundSnafu {
|
||||
table_schema: self.data.schema(),
|
||||
}
|
||||
);
|
||||
|
||||
self.data.schema_value = value;
|
||||
self.data.state = AlterDatabaseState::UpdateMetadata;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub async fn on_update_metadata(&mut self) -> Result<Status> {
|
||||
let schema_name = SchemaNameKey::new(self.data.catalog(), self.data.schema());
|
||||
|
||||
// Safety: schema_value is not None.
|
||||
let current_schema_value = self.data.schema_value.as_ref().unwrap();
|
||||
|
||||
let new_schema_value = build_new_schema_value(
|
||||
current_schema_value.get_inner_ref().clone(),
|
||||
&self.data.kind,
|
||||
)?;
|
||||
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.update(schema_name, current_schema_value, &new_schema_value)
|
||||
.await?;
|
||||
|
||||
info!("Updated database metadata for schema {schema_name}");
|
||||
self.data.state = AlterDatabaseState::InvalidateSchemaCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub async fn on_invalidate_schema_cache(&mut self) -> Result<Status> {
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&Context::default(),
|
||||
&[CacheIdent::SchemaName(SchemaName {
|
||||
catalog_name: self.data.catalog().to_string(),
|
||||
schema_name: self.data.schema().to_string(),
|
||||
})],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for AlterDatabaseProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
match self.data.state {
|
||||
AlterDatabaseState::Prepare => self.on_prepare().await,
|
||||
AlterDatabaseState::UpdateMetadata => self.on_update_metadata().await,
|
||||
AlterDatabaseState::InvalidateSchemaCache => self.on_invalidate_schema_cache().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let catalog = self.data.catalog();
|
||||
let schema = self.data.schema();
|
||||
|
||||
let lock_key = vec![
|
||||
CatalogLock::Read(catalog).into(),
|
||||
SchemaLock::write(catalog, schema).into(),
|
||||
];
|
||||
|
||||
LockKey::new(lock_key)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
enum AlterDatabaseState {
|
||||
Prepare,
|
||||
UpdateMetadata,
|
||||
InvalidateSchemaCache,
|
||||
}
|
||||
|
||||
/// The data of alter database procedure.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterDatabaseData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterDatabaseState,
|
||||
kind: AlterDatabaseKind,
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
schema_value: Option<DeserializedValueWithBytes<SchemaNameValue>>,
|
||||
}
|
||||
|
||||
impl AlterDatabaseData {
|
||||
pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
state: AlterDatabaseState::Prepare,
|
||||
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
|
||||
catalog_name: task.alter_expr.catalog_name,
|
||||
schema_name: task.alter_expr.schema_name,
|
||||
schema_value: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn catalog(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &str {
|
||||
&self.schema_name
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::ddl::alter_database::build_new_schema_value;
|
||||
use crate::key::schema_name::SchemaNameValue;
|
||||
use crate::rpc::ddl::{
|
||||
AlterDatabaseKind, SetDatabaseOption, SetDatabaseOptions, UnsetDatabaseOption,
|
||||
UnsetDatabaseOptions,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_build_new_schema_value() {
|
||||
let set_ttl = AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(vec![
|
||||
SetDatabaseOption::Ttl(Duration::from_secs(10)),
|
||||
]));
|
||||
let current_schema_value = SchemaNameValue::default();
|
||||
let new_schema_value =
|
||||
build_new_schema_value(current_schema_value.clone(), &set_ttl).unwrap();
|
||||
assert_eq!(new_schema_value.ttl, Some(Duration::from_secs(10)));
|
||||
|
||||
let unset_ttl_alter_kind =
|
||||
AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(vec![
|
||||
UnsetDatabaseOption::Ttl,
|
||||
]));
|
||||
let new_schema_value =
|
||||
build_new_schema_value(current_schema_value, &unset_ttl_alter_kind).unwrap();
|
||||
assert_eq!(new_schema_value.ttl, None);
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
|
||||
@@ -19,7 +19,7 @@ mod update_metadata;
|
||||
|
||||
use std::vec;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::ensure;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{
|
||||
alter_request, AddColumn, AddColumns, AlterRequest, DropColumn, DropColumns, RegionColumnDef,
|
||||
@@ -91,7 +91,7 @@ fn create_proto_alter_kind(
|
||||
add_columns,
|
||||
})))
|
||||
}
|
||||
Kind::ChangeColumnTypes(x) => Ok(Some(alter_request::Kind::ChangeColumnTypes(x.clone()))),
|
||||
Kind::ModifyColumnTypes(x) => Ok(Some(alter_request::Kind::ModifyColumnTypes(x.clone()))),
|
||||
Kind::DropColumns(x) => {
|
||||
let drop_columns = x
|
||||
.drop_columns
|
||||
@@ -106,7 +106,12 @@ fn create_proto_alter_kind(
|
||||
})))
|
||||
}
|
||||
Kind::RenameTable(_) => Ok(None),
|
||||
Kind::ChangeTableOptions(v) => Ok(Some(alter_request::Kind::ChangeTableOptions(v.clone()))),
|
||||
Kind::SetTableOptions(v) => Ok(Some(alter_request::Kind::SetTableOptions(v.clone()))),
|
||||
Kind::UnsetTableOptions(v) => Ok(Some(alter_request::Kind::UnsetTableOptions(v.clone()))),
|
||||
Kind::SetColumnFulltext(v) => Ok(Some(alter_request::Kind::SetColumnFulltext(v.clone()))),
|
||||
Kind::UnsetColumnFulltext(v) => {
|
||||
Ok(Some(alter_request::Kind::UnsetColumnFulltext(v.clone())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,12 +121,12 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::add_column_location::LocationType;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::RegionColumnDef;
|
||||
use api::v1::{
|
||||
region, AddColumn, AddColumnLocation, AddColumns, AlterExpr, ChangeColumnType,
|
||||
ChangeColumnTypes, ColumnDataType, ColumnDef as PbColumnDef, SemanticType,
|
||||
region, AddColumn, AddColumnLocation, AddColumns, AlterTableExpr, ColumnDataType,
|
||||
ColumnDef as PbColumnDef, ModifyColumnType, ModifyColumnTypes, SemanticType,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
@@ -210,7 +215,7 @@ mod tests {
|
||||
prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name,
|
||||
@@ -277,12 +282,12 @@ mod tests {
|
||||
prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name,
|
||||
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||
change_column_types: vec![ChangeColumnType {
|
||||
kind: Some(Kind::ModifyColumnTypes(ModifyColumnTypes {
|
||||
modify_column_types: vec![ModifyColumnType {
|
||||
column_name: "cpu".to_string(),
|
||||
target_type: ColumnDataType::String as i32,
|
||||
target_type_extension: None,
|
||||
@@ -303,9 +308,9 @@ mod tests {
|
||||
assert_eq!(alter_region_request.schema_version, 1);
|
||||
assert_eq!(
|
||||
alter_region_request.kind,
|
||||
Some(region::alter_request::Kind::ChangeColumnTypes(
|
||||
ChangeColumnTypes {
|
||||
change_column_types: vec![ChangeColumnType {
|
||||
Some(region::alter_request::Kind::ModifyColumnTypes(
|
||||
ModifyColumnTypes {
|
||||
modify_column_types: vec![ModifyColumnType {
|
||||
column_name: "cpu".to_string(),
|
||||
target_type: ColumnDataType::String as i32,
|
||||
target_type_extension: None,
|
||||
|
||||
@@ -52,8 +52,11 @@ impl AlterTableProcedure {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ChangeColumnTypes { .. }
|
||||
| AlterKind::ChangeTableOptions { .. } => {}
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetColumnFulltext { .. }
|
||||
| AlterKind::UnsetColumnFulltext { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
|
||||
@@ -28,6 +28,7 @@ use common_procedure::{
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use futures::TryStreamExt;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -43,7 +44,7 @@ use crate::instruction::{CacheIdent, CreateFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{FlowId, FlowPartitionId};
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
|
||||
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
|
||||
@@ -75,6 +76,7 @@ impl CreateFlowProcedure {
|
||||
source_table_ids: vec![],
|
||||
query_context,
|
||||
state: CreateFlowState::Prepare,
|
||||
prev_flow_info_value: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -90,6 +92,7 @@ impl CreateFlowProcedure {
|
||||
let flow_name = &self.data.task.flow_name;
|
||||
let sink_table_name = &self.data.task.sink_table_name;
|
||||
let create_if_not_exists = self.data.task.create_if_not_exists;
|
||||
let or_replace = self.data.task.or_replace;
|
||||
|
||||
let flow_name_value = self
|
||||
.context
|
||||
@@ -98,16 +101,56 @@ impl CreateFlowProcedure {
|
||||
.get(catalog_name, flow_name)
|
||||
.await?;
|
||||
|
||||
if create_if_not_exists && or_replace {
|
||||
// this is forbidden because not clear what does that mean exactly
|
||||
return error::UnsupportedSnafu {
|
||||
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if let Some(value) = flow_name_value {
|
||||
ensure!(
|
||||
create_if_not_exists,
|
||||
create_if_not_exists || or_replace,
|
||||
error::FlowAlreadyExistsSnafu {
|
||||
flow_name: format_full_flow_name(catalog_name, flow_name),
|
||||
}
|
||||
);
|
||||
|
||||
let flow_id = value.flow_id();
|
||||
return Ok(Status::done_with_output(flow_id));
|
||||
if create_if_not_exists {
|
||||
info!("Flow already exists, flow_id: {}", flow_id);
|
||||
return Ok(Status::done_with_output(flow_id));
|
||||
}
|
||||
|
||||
let flow_id = value.flow_id();
|
||||
let peers = self
|
||||
.context
|
||||
.flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.map_ok(|(_, value)| value.peer)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
self.data.flow_id = Some(flow_id);
|
||||
self.data.peers = peers;
|
||||
info!("Replacing flow, flow_id: {}", flow_id);
|
||||
|
||||
let flow_info_value = self
|
||||
.context
|
||||
.flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get_raw(flow_id)
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
flow_info_value.is_some(),
|
||||
error::FlowNotFoundSnafu {
|
||||
flow_name: format_full_flow_name(catalog_name, flow_name),
|
||||
}
|
||||
);
|
||||
|
||||
self.data.prev_flow_info_value = flow_info_value;
|
||||
}
|
||||
|
||||
// Ensures sink table doesn't exist.
|
||||
@@ -128,7 +171,9 @@ impl CreateFlowProcedure {
|
||||
}
|
||||
|
||||
self.collect_source_tables().await?;
|
||||
self.allocate_flow_id().await?;
|
||||
if self.data.flow_id.is_none() {
|
||||
self.allocate_flow_id().await?;
|
||||
}
|
||||
self.data.state = CreateFlowState::CreateFlows;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
@@ -153,7 +198,10 @@ impl CreateFlowProcedure {
|
||||
.map_err(add_peer_context_if_needed(peer.clone()))
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Creating flow({:?}) on flownodes with peers={:?}",
|
||||
self.data.flow_id, self.data.peers
|
||||
);
|
||||
join_all(create_flow)
|
||||
.await
|
||||
.into_iter()
|
||||
@@ -170,18 +218,29 @@ impl CreateFlowProcedure {
|
||||
async fn on_create_metadata(&mut self) -> Result<Status> {
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
// TODO(weny): Support `or_replace`.
|
||||
let (flow_info, flow_routes) = (&self.data).into();
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Created flow metadata for flow {flow_id}");
|
||||
if let Some(prev_flow_value) = self.data.prev_flow_info_value.as_ref()
|
||||
&& self.data.task.or_replace
|
||||
{
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Replaced flow metadata for flow {flow_id}");
|
||||
} else {
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Created flow metadata for flow {flow_id}");
|
||||
}
|
||||
|
||||
self.data.state = CreateFlowState::InvalidateFlowCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
let ctx = Context {
|
||||
@@ -192,10 +251,13 @@ impl CreateFlowProcedure {
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownodes: self.data.peers.clone(),
|
||||
})],
|
||||
&[
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownodes: self.data.peers.clone(),
|
||||
}),
|
||||
CacheIdent::FlowId(flow_id),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -270,6 +332,9 @@ pub struct CreateFlowData {
|
||||
pub(crate) peers: Vec<Peer>,
|
||||
pub(crate) source_table_ids: Vec<TableId>,
|
||||
pub(crate) query_context: QueryContext,
|
||||
/// For verify if prev value is consistent when need to update flow metadata.
|
||||
/// only set when `or_replace` is true.
|
||||
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||
}
|
||||
|
||||
impl From<&CreateFlowData> for CreateRequest {
|
||||
@@ -284,8 +349,9 @@ impl From<&CreateFlowData> for CreateRequest {
|
||||
.map(|table_id| api::v1::TableId { id: *table_id })
|
||||
.collect_vec(),
|
||||
sink_table_name: Some(value.task.sink_table_name.clone().into()),
|
||||
// Always be true
|
||||
// Always be true to ensure idempotent in case of retry
|
||||
create_if_not_exists: true,
|
||||
or_replace: value.task.or_replace,
|
||||
expire_after: value.task.expire_after.map(|value| ExpireAfter { value }),
|
||||
comment: value.task.comment.clone(),
|
||||
sql: value.task.sql.clone(),
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{AddColumn, AddColumns, AlterExpr, ColumnDef, RenameTable};
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::{AddColumn, AddColumns, AlterTableExpr, ColumnDef, RenameTable};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use derive_builder::Builder;
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct TestAlterTableExpr {
|
||||
new_table_name: Option<String>,
|
||||
}
|
||||
|
||||
impl From<TestAlterTableExpr> for AlterExpr {
|
||||
impl From<TestAlterTableExpr> for AlterTableExpr {
|
||||
fn from(value: TestAlterTableExpr) -> Self {
|
||||
if let Some(new_table_name) = value.new_table_name {
|
||||
Self {
|
||||
|
||||
@@ -16,11 +16,11 @@ use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{region_request, RegionRequest};
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, AlterExpr, ChangeTableOption, ChangeTableOptions, ColumnDataType,
|
||||
ColumnDef as PbColumnDef, DropColumn, DropColumns, SemanticType,
|
||||
AddColumn, AddColumns, AlterTableExpr, ColumnDataType, ColumnDef as PbColumnDef, DropColumn,
|
||||
DropColumns, SemanticType, SetTableOptions,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::ErrorExt;
|
||||
@@ -133,7 +133,7 @@ async fn test_on_submit_alter_request() {
|
||||
.unwrap();
|
||||
|
||||
let alter_table_task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
@@ -219,7 +219,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
.unwrap();
|
||||
|
||||
let alter_table_task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
@@ -316,7 +316,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
.unwrap();
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
@@ -385,12 +385,12 @@ async fn test_on_update_table_options() {
|
||||
.unwrap();
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::ChangeTableOptions(ChangeTableOptions {
|
||||
change_table_options: vec![ChangeTableOption {
|
||||
kind: Some(Kind::SetTableOptions(SetTableOptions {
|
||||
table_options: vec![api::v1::Option {
|
||||
key: TTL_KEY.to_string(),
|
||||
value: "1d".to_string(),
|
||||
}],
|
||||
|
||||
@@ -24,6 +24,7 @@ use derive_builder::Builder;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::ddl::alter_database::AlterDatabaseProcedure;
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_database::CreateDatabaseProcedure;
|
||||
@@ -47,12 +48,13 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables, CreateTable,
|
||||
CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView, TruncateTable,
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
TruncateTable,
|
||||
};
|
||||
use crate::rpc::ddl::{
|
||||
AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask, CreateViewTask,
|
||||
DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
|
||||
AlterDatabaseTask, AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask,
|
||||
CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
|
||||
SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::procedure;
|
||||
@@ -129,6 +131,7 @@ impl DdlManager {
|
||||
CreateFlowProcedure,
|
||||
AlterTableProcedure,
|
||||
AlterLogicalTablesProcedure,
|
||||
AlterDatabaseProcedure,
|
||||
DropTableProcedure,
|
||||
DropFlowProcedure,
|
||||
TruncateTableProcedure,
|
||||
@@ -294,6 +297,18 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
pub async fn submit_alter_database(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
/// Submits and executes a create flow task.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_flow_task(
|
||||
@@ -593,6 +608,28 @@ async fn handle_drop_database_task(
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_alter_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_database(cluster_id, alter_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
info!(
|
||||
"Database {}.{} is altered via procedure_id {id:?}",
|
||||
alter_database_task.catalog(),
|
||||
alter_database_task.schema()
|
||||
);
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_drop_flow_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
@@ -655,10 +692,17 @@ async fn handle_create_flow_task(
|
||||
procedure_id: &procedure_id,
|
||||
err_msg: "downcast to `u32`",
|
||||
})?);
|
||||
info!(
|
||||
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
|
||||
create_flow_task.catalog_name, create_flow_task.flow_name,
|
||||
);
|
||||
if !create_flow_task.or_replace {
|
||||
info!(
|
||||
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
|
||||
create_flow_task.catalog_name, create_flow_task.flow_name,
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"Flow {}.{}({flow_id}) is replaced via procedure_id {id:?}",
|
||||
create_flow_task.catalog_name, create_flow_task.flow_name,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
@@ -772,6 +816,9 @@ impl ProcedureExecutor for DdlManager {
|
||||
DropDatabase(drop_database_task) => {
|
||||
handle_drop_database_task(self, cluster_id, drop_database_task).await
|
||||
}
|
||||
AlterDatabase(alter_database_task) => {
|
||||
handle_alter_database_task(self, cluster_id, alter_database_task).await
|
||||
}
|
||||
CreateFlow(create_flow_task) => {
|
||||
handle_create_flow_task(
|
||||
self,
|
||||
|
||||
@@ -593,6 +593,21 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid set database option, key: {}, value: {}", key, value))]
|
||||
InvalidSetDatabaseOption {
|
||||
key: String,
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid unset database option, key: {}", key))]
|
||||
InvalidUnsetDatabaseOption {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
|
||||
MismatchPrefix {
|
||||
prefix: String,
|
||||
@@ -730,7 +745,9 @@ impl ErrorExt for Error {
|
||||
| AlterLogicalTablesInvalidArguments { .. }
|
||||
| CreateLogicalTablesInvalidArguments { .. }
|
||||
| MismatchPrefix { .. }
|
||||
| TlsConfig { .. } => StatusCode::InvalidArguments,
|
||||
| TlsConfig { .. }
|
||||
| InvalidSetDatabaseOption { .. }
|
||||
| InvalidUnsetDatabaseOption { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
FlowRouteNotFound { .. } => StatusCode::Unexpected,
|
||||
|
||||
@@ -90,7 +90,9 @@
|
||||
pub mod catalog_name;
|
||||
pub mod datanode_table;
|
||||
pub mod flow;
|
||||
pub mod maintenance;
|
||||
pub mod node_address;
|
||||
mod schema_metadata_manager;
|
||||
pub mod schema_name;
|
||||
pub mod table_info;
|
||||
pub mod table_name;
|
||||
@@ -116,6 +118,7 @@ use flow::flow_route::FlowRouteValue;
|
||||
use flow::table_flow::TableFlowValue;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
pub use schema_metadata_manager::{SchemaMetadataManager, SchemaMetadataManagerRef};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -562,13 +565,13 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_create_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_view_info = on_create_view_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty view info during the create view info",
|
||||
err_msg: "Reads the empty view info in comparing operation of creating view metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -641,13 +644,13 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_create_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route = on_create_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
err_msg: "Reads the empty table route in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -728,13 +731,13 @@ impl TableMetadataManager {
|
||||
for on_failure in on_failures {
|
||||
let remote_table_info = (on_failure.on_create_table_info_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route = (on_failure.on_create_table_route_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
err_msg: "Reads the empty table route in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -912,7 +915,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the rename table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of the rename table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -958,7 +961,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
err_msg: "Reads the empty table info in comparing operation of the updating table info",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1009,7 +1012,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_view_info = on_update_view_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty view info during the updating view info",
|
||||
err_msg: "Reads the empty view info in comparing operation of the updating view info",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1066,7 +1069,7 @@ impl TableMetadataManager {
|
||||
for on_failure in on_failures {
|
||||
let remote_table_info = (on_failure.on_update_table_info_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
err_msg: "Reads the empty table info in comparing operation of the updating table info",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1118,7 +1121,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating table route",
|
||||
err_msg: "Reads the empty table route in comparing operation of the updating table route",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1170,7 +1173,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating leader region status",
|
||||
err_msg: "Reads the empty table route in comparing operation of the updating leader region status",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1258,7 +1261,8 @@ impl_metadata_value! {
|
||||
FlowNameValue,
|
||||
FlowRouteValue,
|
||||
TableFlowValue,
|
||||
NodeAddressValue
|
||||
NodeAddressValue,
|
||||
SchemaNameValue
|
||||
}
|
||||
|
||||
impl_optional_metadata_value! {
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
||||
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{FlowId, MetadataKey};
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, MetadataKey};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
@@ -197,7 +197,7 @@ impl FlowMetadataManager {
|
||||
on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow name during the creating flow, flow_id: {flow_id}"
|
||||
"Reads the empty flow name in comparing operation of the creating flow, flow_id: {flow_id}"
|
||||
),
|
||||
}
|
||||
})?;
|
||||
@@ -220,7 +220,7 @@ impl FlowMetadataManager {
|
||||
let remote_flow =
|
||||
on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow during the creating flow, flow_id: {flow_id}"
|
||||
"Reads the empty flow in comparing operation of creating flow, flow_id: {flow_id}"
|
||||
),
|
||||
})?;
|
||||
let op_name = "creating flow";
|
||||
@@ -230,6 +230,102 @@ impl FlowMetadataManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update metadata for flow and returns an error if old metadata IS NOT exists.
|
||||
pub async fn update_flow_metadata(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
new_flow_info: &FlowInfoValue,
|
||||
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
|
||||
) -> Result<()> {
|
||||
let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) =
|
||||
self.flow_name_manager.build_update_txn(
|
||||
&new_flow_info.catalog_name,
|
||||
&new_flow_info.flow_name,
|
||||
flow_id,
|
||||
)?;
|
||||
|
||||
let (create_flow_txn, on_create_flow_failure) =
|
||||
self.flow_info_manager
|
||||
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
|
||||
|
||||
let create_flow_routes_txn = self
|
||||
.flow_route_manager
|
||||
.build_create_txn(flow_id, flow_routes.clone())?;
|
||||
|
||||
let create_flownode_flow_txn = self
|
||||
.flownode_flow_manager
|
||||
.build_create_txn(flow_id, new_flow_info.flownode_ids().clone());
|
||||
|
||||
let create_table_flow_txn = self.table_flow_manager.build_create_txn(
|
||||
flow_id,
|
||||
flow_routes
|
||||
.into_iter()
|
||||
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
|
||||
.collect(),
|
||||
new_flow_info.source_table_ids(),
|
||||
)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![
|
||||
create_flow_flow_name_txn,
|
||||
create_flow_txn,
|
||||
create_flow_routes_txn,
|
||||
create_flownode_flow_txn,
|
||||
create_table_flow_txn,
|
||||
]);
|
||||
info!(
|
||||
"Creating flow {}.{}({}), with {} txn operations",
|
||||
new_flow_info.catalog_name,
|
||||
new_flow_info.flow_name,
|
||||
flow_id,
|
||||
txn.max_operations()
|
||||
);
|
||||
|
||||
let mut resp = self.kv_backend.txn(txn).await?;
|
||||
if !resp.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||
let remote_flow_flow_name =
|
||||
on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow name in comparing operation of the updating flow, flow_id: {flow_id}"
|
||||
),
|
||||
}
|
||||
})?;
|
||||
|
||||
if remote_flow_flow_name.flow_id() != flow_id {
|
||||
info!(
|
||||
"Trying to updating flow {}.{}({}), but flow({}) already exists with a different flow id",
|
||||
new_flow_info.catalog_name,
|
||||
new_flow_info.flow_name,
|
||||
flow_id,
|
||||
remote_flow_flow_name.flow_id()
|
||||
);
|
||||
|
||||
return error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads different flow id when updating flow({2}.{3}), prev flow id = {0}, updating with flow id = {1}",
|
||||
remote_flow_flow_name.flow_id(),
|
||||
flow_id,
|
||||
new_flow_info.catalog_name,
|
||||
new_flow_info.flow_name,
|
||||
),
|
||||
}.fail();
|
||||
}
|
||||
|
||||
let remote_flow =
|
||||
on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow in comparing operation of the updating flow, flow_id: {flow_id}"
|
||||
),
|
||||
})?;
|
||||
let op_name = "updating flow";
|
||||
ensure_values!(*remote_flow, new_flow_info.clone(), op_name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flow_metadata_keys(&self, flow_id: FlowId, flow_value: &FlowInfoValue) -> Vec<Vec<u8>> {
|
||||
let source_table_ids = flow_value.source_table_ids();
|
||||
let mut keys =
|
||||
@@ -560,4 +656,222 @@ mod tests {
|
||||
// Ensures all keys are deleted
|
||||
assert!(mem_kv.is_empty())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
|
||||
let flow_id = 10;
|
||||
let flow_value = test_flow_info_value(
|
||||
"flow",
|
||||
[(0, 1u64), (1, 2u64)].into(),
|
||||
vec![1024, 1025, 1026],
|
||||
);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
1u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let new_flow_value = {
|
||||
let mut tmp = flow_value.clone();
|
||||
tmp.raw_sql = "new".to_string();
|
||||
tmp
|
||||
};
|
||||
|
||||
// Update flow instead
|
||||
flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&new_flow_value,
|
||||
flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let got = flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let routes = flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
routes,
|
||||
vec![
|
||||
(
|
||||
FlowRouteKey::new(flow_id, 1),
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
FlowRouteKey::new(flow_id, 2),
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
]
|
||||
);
|
||||
assert_eq!(got, new_flow_value);
|
||||
let flows = flow_metadata_manager
|
||||
.flownode_flow_manager()
|
||||
.flows(1)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(flows, vec![(flow_id, 0)]);
|
||||
for table_id in [1024, 1025, 1026] {
|
||||
let nodes = flow_metadata_manager
|
||||
.table_flow_manager()
|
||||
.flows(table_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
nodes,
|
||||
vec![
|
||||
(
|
||||
TableFlowKey::new(table_id, 1, flow_id, 1),
|
||||
TableFlowValue {
|
||||
peer: Peer::empty(1)
|
||||
}
|
||||
),
|
||||
(
|
||||
TableFlowKey::new(table_id, 2, flow_id, 2),
|
||||
TableFlowValue {
|
||||
peer: Peer::empty(2)
|
||||
}
|
||||
)
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
|
||||
let flow_id = 10;
|
||||
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
1u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// update again with same flow id
|
||||
flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&flow_value,
|
||||
flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
// update again with wrong flow id, expected error
|
||||
let err = flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id + 1,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&flow_value,
|
||||
flow_routes,
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, error::Error::Unexpected { .. });
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("Reads different flow id when updating flow"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata_unexpected_err_prev_value_diff() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
|
||||
let flow_id = 10;
|
||||
let catalog_name = "greptime";
|
||||
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
1u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates again.
|
||||
let another_sink_table_name = TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: "my_schema".to_string(),
|
||||
table_name: "another_sink_table".to_string(),
|
||||
};
|
||||
let flow_value = FlowInfoValue {
|
||||
catalog_name: "greptime".to_string(),
|
||||
flow_name: "flow".to_string(),
|
||||
source_table_ids: vec![1024, 1025, 1026],
|
||||
sink_table_name: another_sink_table_name,
|
||||
flownode_ids: [(0, 1u64)].into(),
|
||||
raw_sql: "raw".to_string(),
|
||||
expire_after: Some(300),
|
||||
comment: "hi".to_string(),
|
||||
options: Default::default(),
|
||||
};
|
||||
let err = flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&flow_value,
|
||||
flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(
|
||||
err.to_string().contains("Reads the different value"),
|
||||
"error: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::FlownodeId;
|
||||
|
||||
@@ -196,6 +196,19 @@ impl FlowInfoManager {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the [FlowInfoValue] with original bytes of specified `flow_id`.
|
||||
pub async fn get_raw(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<FlowInfoValue>>> {
|
||||
let key = FlowInfoKey::new(flow_id).to_bytes();
|
||||
self.kv_backend
|
||||
.get(&key)
|
||||
.await?
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Builds a create flow transaction.
|
||||
/// It is expected that the `__flow/info/{flow_id}` wasn't occupied.
|
||||
/// Otherwise, the transaction will retrieve existing value.
|
||||
@@ -215,6 +228,36 @@ impl FlowInfoManager {
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a update flow transaction.
|
||||
/// It is expected that the `__flow/info/{flow_id}` IS ALREADY occupied and equal to `prev_flow_value`,
|
||||
/// but the new value can be the same, so to allow replace operation to happen even when the value is the same.
|
||||
/// Otherwise, the transaction will retrieve existing value and fail.
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_value: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
new_flow_value: &FlowInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowInfoDecodeResult,
|
||||
)> {
|
||||
let key = FlowInfoKey::new(flow_id).to_bytes();
|
||||
let raw_value = new_flow_value.try_as_raw_value()?;
|
||||
let prev_value = current_flow_value.get_raw_bytes();
|
||||
let txn = Txn::new()
|
||||
.when(vec![
|
||||
Compare::new(key.clone(), CompareOp::NotEqual, None),
|
||||
Compare::new(key.clone(), CompareOp::Equal, Some(prev_value)),
|
||||
])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -237,6 +237,37 @@ impl FlowNameManager {
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a update flow name transaction. Which doesn't change either the name or id, just checking if they are the same.
|
||||
/// It's expected that the `__flow/name/{catalog}/{flow_name}` IS already occupied,
|
||||
/// and both flow name and flow id is the same.
|
||||
/// Otherwise, the transaction will retrieve existing value(and fail).
|
||||
pub fn build_update_txn(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
flow_name: &str,
|
||||
flow_id: FlowId,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowNameDecodeResult,
|
||||
)> {
|
||||
let key = FlowNameKey::new(catalog_name, flow_name);
|
||||
let raw_key = key.to_bytes();
|
||||
let flow_flow_name_value = FlowNameValue::new(flow_id);
|
||||
let raw_value = flow_flow_name_value.try_as_raw_value()?;
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::new(
|
||||
raw_key.clone(),
|
||||
CompareOp::Equal,
|
||||
Some(raw_value),
|
||||
)])
|
||||
.or_else(vec![TxnOp::Get(raw_key.clone())]);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
86
src/common/meta/src/key/maintenance.rs
Normal file
86
src/common/meta/src/key/maintenance.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::MAINTENANCE_KEY;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
pub type MaintenanceModeManagerRef = Arc<MaintenanceModeManager>;
|
||||
|
||||
/// The maintenance mode manager.
|
||||
///
|
||||
/// Used to enable or disable maintenance mode.
|
||||
#[derive(Clone)]
|
||||
pub struct MaintenanceModeManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl MaintenanceModeManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Enables maintenance mode.
|
||||
pub async fn set_maintenance_mode(&self) -> Result<()> {
|
||||
let req = PutRequest {
|
||||
key: Vec::from(MAINTENANCE_KEY),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.put(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unsets maintenance mode.
|
||||
pub async fn unset_maintenance_mode(&self) -> Result<()> {
|
||||
self.kv_backend
|
||||
.delete(MAINTENANCE_KEY.as_bytes(), false)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if maintenance mode is enabled.
|
||||
pub async fn maintenance_mode(&self) -> Result<bool> {
|
||||
self.kv_backend.exists(MAINTENANCE_KEY.as_bytes()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::key::maintenance::MaintenanceModeManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maintenance_mode_manager() {
|
||||
let maintenance_mode_manager = Arc::new(MaintenanceModeManager::new(Arc::new(
|
||||
MemoryKvBackend::new(),
|
||||
)));
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.set_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
}
|
||||
125
src/common/meta/src/key/schema_metadata_manager.rs
Normal file
125
src/common/meta/src/key/schema_metadata_manager.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Schema-level metadata manager.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::error::TableInfoNotFoundSnafu;
|
||||
use crate::key::schema_name::{SchemaManager, SchemaNameKey};
|
||||
use crate::key::table_info::{TableInfoManager, TableInfoManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::{error, SchemaOptions};
|
||||
|
||||
pub type SchemaMetadataManagerRef = Arc<SchemaMetadataManager>;
|
||||
|
||||
pub struct SchemaMetadataManager {
|
||||
table_info_manager: TableInfoManagerRef,
|
||||
schema_manager: SchemaManager,
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl SchemaMetadataManager {
|
||||
/// Creates a new database meta
|
||||
#[cfg(not(any(test, feature = "testing")))]
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
|
||||
let schema_manager = SchemaManager::new(kv_backend);
|
||||
Self {
|
||||
table_info_manager,
|
||||
schema_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new database meta
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
|
||||
let schema_manager = SchemaManager::new(kv_backend.clone());
|
||||
Self {
|
||||
table_info_manager,
|
||||
schema_manager,
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets schema options by table id.
|
||||
pub async fn get_schema_options_by_table_id(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> error::Result<Option<SchemaOptions>> {
|
||||
let table_info = self
|
||||
.table_info_manager
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format!("table id: {}", table_id),
|
||||
})?;
|
||||
|
||||
let key = SchemaNameKey::new(
|
||||
&table_info.table_info.catalog_name,
|
||||
&table_info.table_info.schema_name,
|
||||
);
|
||||
self.schema_manager
|
||||
.get(key)
|
||||
.await
|
||||
.map(|v| v.map(|v| v.into_inner()))
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub async fn register_region_table_info(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_name: &str,
|
||||
schema_name: &str,
|
||||
catalog_name: &str,
|
||||
schema_value: Option<crate::key::schema_name::SchemaNameValue>,
|
||||
) {
|
||||
use table::metadata::{RawTableInfo, TableType};
|
||||
let value = crate::key::table_info::TableInfoValue::new(RawTableInfo {
|
||||
ident: Default::default(),
|
||||
name: table_name.to_string(),
|
||||
desc: None,
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
meta: Default::default(),
|
||||
table_type: TableType::Base,
|
||||
});
|
||||
let (txn, _) = self
|
||||
.table_info_manager
|
||||
.build_create_txn(table_id, &value)
|
||||
.unwrap();
|
||||
let resp = self.kv_backend.txn(txn).await.unwrap();
|
||||
assert!(resp.succeeded, "Failed to create table metadata");
|
||||
let key = SchemaNameKey {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
};
|
||||
self.schema_manager
|
||||
.create(key, schema_value, false)
|
||||
.await
|
||||
.expect("Failed to create schema metadata");
|
||||
common_telemetry::info!(
|
||||
"Register table: {}, id: {}, schema: {}, catalog: {}",
|
||||
table_name,
|
||||
table_id,
|
||||
schema_name,
|
||||
catalog_name
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -21,10 +21,14 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures::stream::BoxStream;
|
||||
use humantime_serde::re::humantime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use super::txn_helper::TxnOpGetResponseSet;
|
||||
use super::DeserializedValueWithBytes;
|
||||
use crate::ensure_values;
|
||||
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -171,6 +175,8 @@ pub struct SchemaManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
pub type SchemaNameDecodeResult = Result<Option<DeserializedValueWithBytes<SchemaNameValue>>>;
|
||||
|
||||
impl SchemaManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
@@ -204,11 +210,15 @@ impl SchemaManager {
|
||||
self.kv_backend.exists(&raw_key).await
|
||||
}
|
||||
|
||||
pub async fn get(&self, schema: SchemaNameKey<'_>) -> Result<Option<SchemaNameValue>> {
|
||||
pub async fn get(
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
) -> Result<Option<DeserializedValueWithBytes<SchemaNameValue>>> {
|
||||
let raw_key = schema.to_bytes();
|
||||
let value = self.kv_backend.get(&raw_key).await?;
|
||||
value
|
||||
.and_then(|v| SchemaNameValue::try_from_raw_value(v.value.as_ref()).transpose())
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
@@ -220,6 +230,54 @@ impl SchemaManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
current_schema_value: &DeserializedValueWithBytes<SchemaNameValue>,
|
||||
new_schema_value: &SchemaNameValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> SchemaNameDecodeResult,
|
||||
)> {
|
||||
let raw_key = schema.to_bytes();
|
||||
let raw_value = current_schema_value.get_raw_bytes();
|
||||
let new_raw_value: Vec<u8> = new_schema_value.try_as_raw_value()?;
|
||||
|
||||
let txn = Txn::compare_and_put(raw_key.clone(), raw_value, new_raw_value);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Updates a [SchemaNameKey].
|
||||
pub async fn update(
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
current_schema_value: &DeserializedValueWithBytes<SchemaNameValue>,
|
||||
new_schema_value: &SchemaNameValue,
|
||||
) -> Result<()> {
|
||||
let (txn, on_failure) =
|
||||
self.build_update_txn(schema, current_schema_value, new_schema_value)?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
if !r.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_schema_value = on_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg:
|
||||
"Reads the empty schema name value in comparing operation of updating schema name value",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the updating schema name value";
|
||||
ensure_values!(&remote_schema_value, new_schema_value, op_name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a schema stream, it lists all schemas belong to the target `catalog`.
|
||||
pub fn schema_names(&self, catalog: &str) -> BoxStream<'static, Result<String>> {
|
||||
let start_key = SchemaNameKey::range_start_key(catalog);
|
||||
@@ -306,4 +364,42 @@ mod tests {
|
||||
|
||||
assert!(!manager.exists(wrong_schema_key).await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_schema_value() {
|
||||
let manager = SchemaManager::new(Arc::new(MemoryKvBackend::default()));
|
||||
let schema_key = SchemaNameKey::new("my-catalog", "my-schema");
|
||||
manager.create(schema_key, None, false).await.unwrap();
|
||||
|
||||
let current_schema_value = manager.get(schema_key).await.unwrap().unwrap();
|
||||
let new_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(10)),
|
||||
};
|
||||
manager
|
||||
.update(schema_key, ¤t_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Update with the same value, should be ok
|
||||
manager
|
||||
.update(schema_key, ¤t_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let new_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(40)),
|
||||
};
|
||||
let incorrect_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(20)),
|
||||
}
|
||||
.try_as_raw_value()
|
||||
.unwrap();
|
||||
let incorrect_schema_value =
|
||||
DeserializedValueWithBytes::from_inner_slice(&incorrect_schema_value).unwrap();
|
||||
|
||||
manager
|
||||
.update(schema_key, &incorrect_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap_err();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,6 +134,7 @@ impl TableInfoValue {
|
||||
}
|
||||
|
||||
pub type TableInfoManagerRef = Arc<TableInfoManager>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TableInfoManager {
|
||||
kv_backend: KvBackendRef,
|
||||
|
||||
@@ -54,4 +54,7 @@ pub type DatanodeId = u64;
|
||||
// The id of the flownode.
|
||||
pub type FlownodeId = u64;
|
||||
|
||||
/// Schema options.
|
||||
pub type SchemaOptions = key::schema_name::SchemaNameValue;
|
||||
|
||||
pub use instruction::RegionIdent;
|
||||
|
||||
@@ -14,25 +14,29 @@
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::result;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::alter_database_expr::Kind as PbAlterDatabaseKind;
|
||||
use api::v1::meta::ddl_task_request::Task;
|
||||
use api::v1::meta::{
|
||||
AlterTableTask as PbAlterTableTask, AlterTableTasks as PbAlterTableTasks,
|
||||
CreateDatabaseTask as PbCreateDatabaseTask, CreateFlowTask as PbCreateFlowTask,
|
||||
CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
|
||||
CreateViewTask as PbCreateViewTask, DdlTaskRequest as PbDdlTaskRequest,
|
||||
DdlTaskResponse as PbDdlTaskResponse, DropDatabaseTask as PbDropDatabaseTask,
|
||||
DropFlowTask as PbDropFlowTask, DropTableTask as PbDropTableTask,
|
||||
DropTableTasks as PbDropTableTasks, DropViewTask as PbDropViewTask, Partition, ProcedureId,
|
||||
AlterDatabaseTask as PbAlterDatabaseTask, AlterTableTask as PbAlterTableTask,
|
||||
AlterTableTasks as PbAlterTableTasks, CreateDatabaseTask as PbCreateDatabaseTask,
|
||||
CreateFlowTask as PbCreateFlowTask, CreateTableTask as PbCreateTableTask,
|
||||
CreateTableTasks as PbCreateTableTasks, CreateViewTask as PbCreateViewTask,
|
||||
DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
|
||||
DropDatabaseTask as PbDropDatabaseTask, DropFlowTask as PbDropFlowTask,
|
||||
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks,
|
||||
DropViewTask as PbDropViewTask, Partition, ProcedureId,
|
||||
TruncateTableTask as PbTruncateTableTask,
|
||||
};
|
||||
use api::v1::{
|
||||
AlterExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr,
|
||||
DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
|
||||
QueryContext as PbQueryContext, TruncateTableExpr,
|
||||
AlterDatabaseExpr, AlterTableExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr,
|
||||
CreateViewExpr, DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
|
||||
Option as PbOption, QueryContext as PbQueryContext, TruncateTableExpr,
|
||||
};
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine as _;
|
||||
use humantime_serde::re::humantime;
|
||||
use prost::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DefaultOnNull};
|
||||
@@ -42,7 +46,7 @@ use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
|
||||
use crate::key::FlowId;
|
||||
|
||||
/// DDL tasks
|
||||
@@ -57,6 +61,7 @@ pub enum DdlTask {
|
||||
AlterLogicalTables(Vec<AlterTableTask>),
|
||||
CreateDatabase(CreateDatabaseTask),
|
||||
DropDatabase(DropDatabaseTask),
|
||||
AlterDatabase(AlterDatabaseTask),
|
||||
CreateFlow(CreateFlowTask),
|
||||
DropFlow(DropFlowTask),
|
||||
CreateView(CreateViewTask),
|
||||
@@ -99,7 +104,7 @@ impl DdlTask {
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to alter several logical tables.
|
||||
pub fn new_alter_logical_tables(table_data: Vec<AlterExpr>) -> Self {
|
||||
pub fn new_alter_logical_tables(table_data: Vec<AlterTableExpr>) -> Self {
|
||||
DdlTask::AlterLogicalTables(
|
||||
table_data
|
||||
.into_iter()
|
||||
@@ -149,8 +154,13 @@ impl DdlTask {
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to alter a database.
|
||||
pub fn new_alter_database(alter_expr: AlterDatabaseExpr) -> Self {
|
||||
DdlTask::AlterDatabase(AlterDatabaseTask { alter_expr })
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to alter a table.
|
||||
pub fn new_alter_table(alter_table: AlterExpr) -> Self {
|
||||
pub fn new_alter_table(alter_table: AlterTableExpr) -> Self {
|
||||
DdlTask::AlterTable(AlterTableTask { alter_table })
|
||||
}
|
||||
|
||||
@@ -223,6 +233,9 @@ impl TryFrom<Task> for DdlTask {
|
||||
Task::DropDatabaseTask(drop_database) => {
|
||||
Ok(DdlTask::DropDatabase(drop_database.try_into()?))
|
||||
}
|
||||
Task::AlterDatabaseTask(alter_database) => {
|
||||
Ok(DdlTask::AlterDatabase(alter_database.try_into()?))
|
||||
}
|
||||
Task::CreateFlowTask(create_flow) => Ok(DdlTask::CreateFlow(create_flow.try_into()?)),
|
||||
Task::DropFlowTask(drop_flow) => Ok(DdlTask::DropFlow(drop_flow.try_into()?)),
|
||||
Task::CreateViewTask(create_view) => Ok(DdlTask::CreateView(create_view.try_into()?)),
|
||||
@@ -272,6 +285,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
||||
}
|
||||
DdlTask::CreateDatabase(task) => Task::CreateDatabaseTask(task.try_into()?),
|
||||
DdlTask::DropDatabase(task) => Task::DropDatabaseTask(task.try_into()?),
|
||||
DdlTask::AlterDatabase(task) => Task::AlterDatabaseTask(task.try_into()?),
|
||||
DdlTask::CreateFlow(task) => Task::CreateFlowTask(task.into()),
|
||||
DdlTask::DropFlow(task) => Task::DropFlowTask(task.into()),
|
||||
DdlTask::CreateView(task) => Task::CreateViewTask(task.try_into()?),
|
||||
@@ -680,7 +694,8 @@ impl<'de> Deserialize<'de> for CreateTableTask {
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct AlterTableTask {
|
||||
pub alter_table: AlterExpr,
|
||||
// TODO(CookiePieWw): Replace proto struct with user-defined struct
|
||||
pub alter_table: AlterTableExpr,
|
||||
}
|
||||
|
||||
impl AlterTableTask {
|
||||
@@ -932,6 +947,125 @@ impl TryFrom<DropDatabaseTask> for PbDropDatabaseTask {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct AlterDatabaseTask {
|
||||
pub alter_expr: AlterDatabaseExpr,
|
||||
}
|
||||
|
||||
impl TryFrom<AlterDatabaseTask> for PbAlterDatabaseTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(task: AlterDatabaseTask) -> Result<Self> {
|
||||
Ok(PbAlterDatabaseTask {
|
||||
task: Some(task.alter_expr),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbAlterDatabaseTask> for AlterDatabaseTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbAlterDatabaseTask) -> Result<Self> {
|
||||
let alter_expr = pb.task.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected alter database",
|
||||
})?;
|
||||
|
||||
Ok(AlterDatabaseTask { alter_expr })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbAlterDatabaseKind> for AlterDatabaseKind {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbAlterDatabaseKind) -> Result<Self> {
|
||||
match pb {
|
||||
PbAlterDatabaseKind::SetDatabaseOptions(options) => {
|
||||
Ok(AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(
|
||||
options
|
||||
.set_database_options
|
||||
.into_iter()
|
||||
.map(SetDatabaseOption::try_from)
|
||||
.collect::<Result<Vec<_>>>()?,
|
||||
)))
|
||||
}
|
||||
PbAlterDatabaseKind::UnsetDatabaseOptions(options) => Ok(
|
||||
AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(
|
||||
options
|
||||
.keys
|
||||
.iter()
|
||||
.map(|key| UnsetDatabaseOption::try_from(key.as_str()))
|
||||
.collect::<Result<Vec<_>>>()?,
|
||||
)),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const TTL_KEY: &str = "ttl";
|
||||
|
||||
impl TryFrom<PbOption> for SetDatabaseOption {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(PbOption { key, value }: PbOption) -> Result<Self> {
|
||||
match key.to_ascii_lowercase().as_str() {
|
||||
TTL_KEY => {
|
||||
let ttl = if value.is_empty() {
|
||||
Duration::from_secs(0)
|
||||
} else {
|
||||
humantime::parse_duration(&value)
|
||||
.map_err(|_| InvalidSetDatabaseOptionSnafu { key, value }.build())?
|
||||
};
|
||||
|
||||
Ok(SetDatabaseOption::Ttl(ttl))
|
||||
}
|
||||
_ => InvalidSetDatabaseOptionSnafu { key, value }.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum SetDatabaseOption {
|
||||
Ttl(Duration),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum UnsetDatabaseOption {
|
||||
Ttl,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for UnsetDatabaseOption {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(key: &str) -> Result<Self> {
|
||||
match key.to_ascii_lowercase().as_str() {
|
||||
TTL_KEY => Ok(UnsetDatabaseOption::Ttl),
|
||||
_ => InvalidUnsetDatabaseOptionSnafu { key }.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub struct SetDatabaseOptions(pub Vec<SetDatabaseOption>);
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub struct UnsetDatabaseOptions(pub Vec<UnsetDatabaseOption>);
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum AlterDatabaseKind {
|
||||
SetDatabaseOptions(SetDatabaseOptions),
|
||||
UnsetDatabaseOptions(UnsetDatabaseOptions),
|
||||
}
|
||||
|
||||
impl AlterDatabaseTask {
|
||||
pub fn catalog(&self) -> &str {
|
||||
&self.alter_expr.catalog_name
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &str {
|
||||
&self.alter_expr.catalog_name
|
||||
}
|
||||
}
|
||||
|
||||
/// Create flow
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreateFlowTask {
|
||||
@@ -1118,7 +1252,7 @@ impl From<QueryContext> for PbQueryContext {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::{AlterExpr, ColumnDef, CreateTableExpr, SemanticType};
|
||||
use api::v1::{AlterTableExpr, ColumnDef, CreateTableExpr, SemanticType};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, SchemaBuilder};
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
@@ -1146,7 +1280,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_basic_ser_de_alter_table_task() {
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr::default(),
|
||||
alter_table: AlterTableExpr::default(),
|
||||
};
|
||||
|
||||
let output = serde_json::to_vec(&task).unwrap();
|
||||
|
||||
13
src/common/options/Cargo.toml
Normal file
13
src/common/options/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "common-options"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-grpc.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -18,20 +18,23 @@ use common_grpc::channel_manager;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct DatanodeOptions {
|
||||
pub client: DatanodeClientOptions,
|
||||
pub struct DatanodeClientOptions {
|
||||
pub client: ClientOptions,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct DatanodeClientOptions {
|
||||
pub struct ClientOptions {
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub timeout: Duration,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub connect_timeout: Duration,
|
||||
pub tcp_nodelay: bool,
|
||||
}
|
||||
|
||||
impl Default for DatanodeClientOptions {
|
||||
impl Default for ClientOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timeout: Duration::from_secs(channel_manager::DEFAULT_GRPC_REQUEST_TIMEOUT_SECS),
|
||||
connect_timeout: Duration::from_secs(
|
||||
channel_manager::DEFAULT_GRPC_CONNECT_TIMEOUT_SECS,
|
||||
),
|
||||
@@ -12,27 +12,4 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Internal metrics of the memtable.
|
||||
|
||||
/// Metrics of writing the partition tree.
|
||||
pub struct WriteMetrics {
|
||||
/// Size allocated by keys.
|
||||
pub key_bytes: usize,
|
||||
/// Size allocated by values.
|
||||
pub value_bytes: usize,
|
||||
/// Minimum timestamp.
|
||||
pub min_ts: i64,
|
||||
/// Maximum timestamp
|
||||
pub max_ts: i64,
|
||||
}
|
||||
|
||||
impl Default for WriteMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
key_bytes: 0,
|
||||
value_bytes: 0,
|
||||
min_ts: i64::MAX,
|
||||
max_ts: i64::MIN,
|
||||
}
|
||||
}
|
||||
}
|
||||
pub mod datanode;
|
||||
@@ -245,6 +245,14 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid vector string: {}", vec_str))]
|
||||
InvalidVectorString {
|
||||
vec_str: String,
|
||||
source: DataTypeError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -273,7 +281,8 @@ impl ErrorExt for Error {
|
||||
| Error::IntoVector { source, .. }
|
||||
| Error::FromScalarValue { source, .. }
|
||||
| Error::ConvertArrowSchema { source, .. }
|
||||
| Error::FromArrowArray { source, .. } => source.status_code(),
|
||||
| Error::FromArrowArray { source, .. }
|
||||
| Error::InvalidVectorString { source, .. } => source.status_code(),
|
||||
|
||||
Error::MissingTableMutationHandler { .. }
|
||||
| Error::MissingProcedureServiceHandler { .. }
|
||||
|
||||
@@ -20,6 +20,7 @@ pin-project.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -161,6 +161,13 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Stream timeout"))]
|
||||
StreamTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tokio::time::error::Elapsed,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -190,6 +197,8 @@ impl ErrorExt for Error {
|
||||
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::StreamTimeout { .. } => StatusCode::Cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -427,7 +427,8 @@ mod test {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut region_server = mock_region_server();
|
||||
let mut engine_env = TestEnv::with_prefix("region-alive-keeper");
|
||||
let engine = Arc::new(engine_env.create_engine(MitoConfig::default()).await);
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
let engine = Arc::new(engine);
|
||||
region_server.register_engine(engine.clone());
|
||||
|
||||
let alive_keeper = Arc::new(RegionAliveKeeper::new(region_server.clone(), 100));
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
//! Datanode configurations
|
||||
|
||||
use core::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_config::Configurable;
|
||||
@@ -30,7 +32,7 @@ use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::mb(256);
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(1);
|
||||
|
||||
/// Default data home in file storage
|
||||
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
|
||||
@@ -112,6 +114,38 @@ pub struct ObjectStorageCacheConfig {
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
/// The http client options to the storage.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct HttpClientConfig {
|
||||
/// The maximum idle connection per host allowed in the pool.
|
||||
pub(crate) pool_max_idle_per_host: u32,
|
||||
|
||||
/// The timeout for only the connect phase of a http client.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) connect_timeout: Duration,
|
||||
|
||||
/// The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
/// Also considered a total deadline.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) timeout: Duration,
|
||||
|
||||
/// The timeout for idle sockets being kept-alive.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub(crate) pool_idle_timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for HttpClientConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pool_max_idle_per_host: 1024,
|
||||
connect_timeout: Duration::from_secs(30),
|
||||
timeout: Duration::from_secs(30),
|
||||
pool_idle_timeout: Duration::from_secs(90),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct S3Config {
|
||||
@@ -126,6 +160,7 @@ pub struct S3Config {
|
||||
pub region: Option<String>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for S3Config {
|
||||
@@ -138,6 +173,7 @@ impl PartialEq for S3Config {
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.region == other.region
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,6 +190,7 @@ pub struct OssConfig {
|
||||
pub endpoint: String,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for OssConfig {
|
||||
@@ -165,6 +202,7 @@ impl PartialEq for OssConfig {
|
||||
&& self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,6 +220,7 @@ pub struct AzblobConfig {
|
||||
pub sas_token: Option<String>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for AzblobConfig {
|
||||
@@ -194,6 +233,7 @@ impl PartialEq for AzblobConfig {
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.sas_token == other.sas_token
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,6 +251,7 @@ pub struct GcsConfig {
|
||||
pub endpoint: String,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
}
|
||||
|
||||
impl PartialEq for GcsConfig {
|
||||
@@ -223,6 +264,7 @@ impl PartialEq for GcsConfig {
|
||||
&& self.credential.expose_secret() == other.credential.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,6 +279,7 @@ impl Default for S3Config {
|
||||
endpoint: Option::default(),
|
||||
region: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -251,6 +294,7 @@ impl Default for OssConfig {
|
||||
access_key_secret: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -266,6 +310,7 @@ impl Default for AzblobConfig {
|
||||
endpoint: String::default(),
|
||||
sas_token: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,6 +326,7 @@ impl Default for GcsConfig {
|
||||
credential: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
http_client: HttpClientConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,11 +18,13 @@ use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::CachedKvBackendBuilder;
|
||||
use catalog::memory::MemoryCatalogManager;
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
|
||||
use common_meta::key::datanode_table::{DatanodeTableManager, DatanodeTableValue};
|
||||
use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::wal_options_allocator::prepare_wal_options;
|
||||
pub use common_procedure::options::ProcedureConfig;
|
||||
@@ -207,7 +209,13 @@ impl DatanodeBuilder {
|
||||
(Box::new(NoopRegionServerEventListener) as _, None)
|
||||
};
|
||||
|
||||
let region_server = self.new_region_server(region_event_listener).await?;
|
||||
let cached_kv_backend = Arc::new(CachedKvBackendBuilder::new(kv_backend.clone()).build());
|
||||
|
||||
let schema_metadata_manager =
|
||||
Arc::new(SchemaMetadataManager::new(cached_kv_backend.clone()));
|
||||
let region_server = self
|
||||
.new_region_server(schema_metadata_manager, region_event_listener)
|
||||
.await?;
|
||||
|
||||
let datanode_table_manager = DatanodeTableManager::new(kv_backend.clone());
|
||||
let table_values = datanode_table_manager
|
||||
@@ -235,7 +243,15 @@ impl DatanodeBuilder {
|
||||
}
|
||||
|
||||
let heartbeat_task = if let Some(meta_client) = meta_client {
|
||||
Some(HeartbeatTask::try_new(&self.opts, region_server.clone(), meta_client).await?)
|
||||
Some(
|
||||
HeartbeatTask::try_new(
|
||||
&self.opts,
|
||||
region_server.clone(),
|
||||
meta_client,
|
||||
cached_kv_backend,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -312,6 +328,7 @@ impl DatanodeBuilder {
|
||||
|
||||
async fn new_region_server(
|
||||
&self,
|
||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
) -> Result<RegionServer> {
|
||||
let opts: &DatanodeOptions = &self.opts;
|
||||
@@ -340,8 +357,13 @@ impl DatanodeBuilder {
|
||||
);
|
||||
|
||||
let object_store_manager = Self::build_object_store_manager(&opts.storage).await?;
|
||||
let engines =
|
||||
Self::build_store_engines(opts, object_store_manager, self.plugins.clone()).await?;
|
||||
let engines = Self::build_store_engines(
|
||||
opts,
|
||||
object_store_manager,
|
||||
schema_metadata_manager,
|
||||
self.plugins.clone(),
|
||||
)
|
||||
.await?;
|
||||
for engine in engines {
|
||||
region_server.register_engine(engine);
|
||||
}
|
||||
@@ -355,6 +377,7 @@ impl DatanodeBuilder {
|
||||
async fn build_store_engines(
|
||||
opts: &DatanodeOptions,
|
||||
object_store_manager: ObjectStoreManagerRef,
|
||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
plugins: Plugins,
|
||||
) -> Result<Vec<RegionEngineRef>> {
|
||||
let mut engines = vec![];
|
||||
@@ -365,6 +388,7 @@ impl DatanodeBuilder {
|
||||
opts,
|
||||
object_store_manager.clone(),
|
||||
config.clone(),
|
||||
schema_metadata_manager.clone(),
|
||||
plugins.clone(),
|
||||
)
|
||||
.await?;
|
||||
@@ -390,6 +414,7 @@ impl DatanodeBuilder {
|
||||
opts: &DatanodeOptions,
|
||||
object_store_manager: ObjectStoreManagerRef,
|
||||
config: MitoConfig,
|
||||
schema_metadata_manager: SchemaMetadataManagerRef,
|
||||
plugins: Plugins,
|
||||
) -> Result<MitoEngine> {
|
||||
let mito_engine = match &opts.wal {
|
||||
@@ -399,6 +424,7 @@ impl DatanodeBuilder {
|
||||
Self::build_raft_engine_log_store(&opts.storage.data_home, raft_engine_config)
|
||||
.await?,
|
||||
object_store_manager,
|
||||
schema_metadata_manager,
|
||||
plugins,
|
||||
)
|
||||
.await
|
||||
@@ -429,6 +455,7 @@ impl DatanodeBuilder {
|
||||
config,
|
||||
Self::build_kafka_log_store(kafka_config, global_index_collector).await?,
|
||||
object_store_manager,
|
||||
schema_metadata_manager,
|
||||
plugins,
|
||||
)
|
||||
.await
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, NodeInfo, Peer, RegionRole, RegionStat};
|
||||
use catalog::kvbackend::CachedKvBackend;
|
||||
use common_meta::datanode::REGION_STATISTIC_KEY;
|
||||
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
@@ -39,6 +40,7 @@ use crate::alive_keeper::RegionAliveKeeper;
|
||||
use crate::config::DatanodeOptions;
|
||||
use crate::error::{self, MetaClientInitSnafu, Result};
|
||||
use crate::event_listener::RegionServerEventReceiver;
|
||||
use crate::heartbeat::handler::cache_invalidator::InvalidateSchemaCacheHandler;
|
||||
use crate::metrics::{self, HEARTBEAT_RECV_COUNT, HEARTBEAT_SENT_COUNT};
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
@@ -70,6 +72,7 @@ impl HeartbeatTask {
|
||||
opts: &DatanodeOptions,
|
||||
region_server: RegionServer,
|
||||
meta_client: MetaClientRef,
|
||||
cache_kv_backend: Arc<CachedKvBackend>,
|
||||
) -> Result<Self> {
|
||||
let region_alive_keeper = Arc::new(RegionAliveKeeper::new(
|
||||
region_server.clone(),
|
||||
@@ -79,6 +82,7 @@ impl HeartbeatTask {
|
||||
region_alive_keeper.clone(),
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())),
|
||||
Arc::new(InvalidateSchemaCacheHandler::new(cache_kv_backend)),
|
||||
]));
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -24,6 +24,7 @@ use futures::future::BoxFuture;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
pub(crate) mod cache_invalidator;
|
||||
mod close_region;
|
||||
mod downgrade_region;
|
||||
mod open_region;
|
||||
@@ -134,7 +135,7 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
}
|
||||
});
|
||||
|
||||
Ok(HandleControl::Done)
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,7 +286,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -340,7 +341,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -373,7 +374,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -420,7 +421,7 @@ mod tests {
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
@@ -442,7 +443,7 @@ mod tests {
|
||||
});
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
assert_matches!(control, HandleControl::Continue);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
|
||||
167
src/datanode/src/heartbeat/handler/cache_invalidator.rs
Normal file
167
src/datanode/src/heartbeat/handler/cache_invalidator.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Schema cache invalidator handler
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::CachedKvBackend;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::instruction::{CacheIdent, Instruction};
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::MetadataKey;
|
||||
use common_telemetry::debug;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct InvalidateSchemaCacheHandler {
|
||||
cached_kv_backend: Arc<CachedKvBackend>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatResponseHandler for InvalidateSchemaCacheHandler {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
ctx.incoming_message.as_ref(),
|
||||
Some((_, Instruction::InvalidateCaches(_)))
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
ctx: &mut HeartbeatResponseHandlerContext,
|
||||
) -> common_meta::error::Result<HandleControl> {
|
||||
let Some((_, Instruction::InvalidateCaches(caches))) = ctx.incoming_message.take() else {
|
||||
unreachable!("InvalidateSchemaCacheHandler: should be guarded by 'is_acceptable'")
|
||||
};
|
||||
|
||||
debug!(
|
||||
"InvalidateSchemaCacheHandler: invalidating caches: {:?}",
|
||||
caches
|
||||
);
|
||||
|
||||
for cache in caches {
|
||||
let CacheIdent::SchemaName(schema_name) = cache else {
|
||||
continue;
|
||||
};
|
||||
let key: SchemaNameKey = (&schema_name).into();
|
||||
let key_bytes = key.to_bytes();
|
||||
// invalidate cache
|
||||
self.cached_kv_backend.invalidate_key(&key_bytes).await;
|
||||
}
|
||||
|
||||
Ok(HandleControl::Done)
|
||||
}
|
||||
}
|
||||
|
||||
impl InvalidateSchemaCacheHandler {
|
||||
pub fn new(cached_kv_backend: Arc<CachedKvBackend>) -> Self {
|
||||
Self { cached_kv_backend }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::HeartbeatResponse;
|
||||
use catalog::kvbackend::CachedKvBackendBuilder;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
|
||||
};
|
||||
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
|
||||
use common_meta::instruction::{CacheIdent, Instruction};
|
||||
use common_meta::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::{MetadataKey, SchemaMetadataManager};
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::KvBackend;
|
||||
use common_meta::rpc::store::PutRequest;
|
||||
|
||||
use crate::heartbeat::handler::cache_invalidator::InvalidateSchemaCacheHandler;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalidate_schema_cache_handler() {
|
||||
let inner_kv = Arc::new(MemoryKvBackend::default());
|
||||
let cached_kv = Arc::new(CachedKvBackendBuilder::new(inner_kv.clone()).build());
|
||||
let schema_metadata_manager = SchemaMetadataManager::new(cached_kv.clone());
|
||||
|
||||
let schema_name = "test_schema";
|
||||
let catalog_name = "test_catalog";
|
||||
schema_metadata_manager
|
||||
.register_region_table_info(
|
||||
1,
|
||||
"test_table",
|
||||
schema_name,
|
||||
catalog_name,
|
||||
Some(SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(1)),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
schema_metadata_manager
|
||||
.get_schema_options_by_table_id(1)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema_key = SchemaNameKey::new(catalog_name, schema_name).to_bytes();
|
||||
let new_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(3)),
|
||||
}
|
||||
.try_as_raw_value()
|
||||
.unwrap();
|
||||
inner_kv
|
||||
.put(PutRequest {
|
||||
key: schema_key.clone(),
|
||||
value: new_schema_value,
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let executor = Arc::new(HandlerGroupExecutor::new(vec![Arc::new(
|
||||
InvalidateSchemaCacheHandler::new(cached_kv),
|
||||
)]));
|
||||
|
||||
let (tx, _) = tokio::sync::mpsc::channel(8);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
|
||||
|
||||
// removes a valid key
|
||||
let response = HeartbeatResponse::default();
|
||||
let mut ctx: HeartbeatResponseHandlerContext =
|
||||
HeartbeatResponseHandlerContext::new(mailbox, response);
|
||||
ctx.incoming_message = Some((
|
||||
MessageMeta::new_test(1, "hi", "foo", "bar"),
|
||||
Instruction::InvalidateCaches(vec![CacheIdent::SchemaName(SchemaName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
})]),
|
||||
));
|
||||
executor.handle(ctx).await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
Some(Duration::from_secs(3)),
|
||||
SchemaNameValue::try_from_raw_value(
|
||||
&inner_kv.get(&schema_key).await.unwrap().unwrap().value
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.ttl
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -74,6 +74,10 @@ impl HandlerContext {
|
||||
|
||||
// Ignores flush request
|
||||
if !writable {
|
||||
warn!(
|
||||
"Region: {region_id} is not writable, flush_timeout: {:?}",
|
||||
flush_timeout
|
||||
);
|
||||
return self.downgrade_to_follower_gracefully(region_id).await;
|
||||
}
|
||||
|
||||
|
||||
@@ -1355,7 +1355,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_region_server_parallism() {
|
||||
async fn test_region_server_parallelism() {
|
||||
let p = RegionServerParallelism::from_opts(2, Duration::from_millis(1)).unwrap();
|
||||
let first_query = p.acquire().await;
|
||||
assert!(first_query.is_ok());
|
||||
|
||||
@@ -32,7 +32,7 @@ use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
|
||||
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub(crate) async fn new_raw_object_store(
|
||||
@@ -177,7 +177,7 @@ pub(crate) fn clean_temp_dir(dir: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn build_http_client() -> Result<HttpClient> {
|
||||
pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient> {
|
||||
let http_builder = {
|
||||
let mut builder = reqwest::ClientBuilder::new();
|
||||
|
||||
@@ -186,25 +186,28 @@ pub(crate) fn build_http_client() -> Result<HttpClient> {
|
||||
let pool_max_idle_per_host = env::var("_GREPTIMEDB_HTTP_POOL_MAX_IDLE_PER_HOST")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<usize>().ok())
|
||||
.unwrap_or(usize::MAX);
|
||||
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_POOL_MAX_IDLE_PER_HOST' might be deprecated in the future. Please set it in the config file instead."))
|
||||
.unwrap_or(config.pool_max_idle_per_host as usize);
|
||||
builder = builder.pool_max_idle_per_host(pool_max_idle_per_host);
|
||||
|
||||
// Connect timeout default to 30s.
|
||||
let connect_timeout = env::var("_GREPTIMEDB_HTTP_CONNECT_TIMEOUT")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(30);
|
||||
builder = builder.connect_timeout(Duration::from_secs(connect_timeout));
|
||||
.and_then(|v| v.parse::<u64>().ok().map(Duration::from_secs))
|
||||
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_CONNECT_TIMEOUT' might be deprecated in the future. Please set it in the config file instead."))
|
||||
.unwrap_or(config.connect_timeout);
|
||||
builder = builder.connect_timeout(connect_timeout);
|
||||
|
||||
// Pool connection idle timeout default to 90s.
|
||||
let idle_timeout = env::var("_GREPTIMEDB_HTTP_POOL_IDLE_TIMEOUT")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(90);
|
||||
.and_then(|v| v.parse::<u64>().ok().map(Duration::from_secs))
|
||||
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_POOL_IDLE_TIMEOUT' might be deprecated in the future. Please set it in the config file instead."))
|
||||
.unwrap_or(config.pool_idle_timeout);
|
||||
|
||||
builder = builder.pool_idle_timeout(Duration::from_secs(idle_timeout));
|
||||
builder = builder.pool_idle_timeout(idle_timeout);
|
||||
|
||||
builder
|
||||
builder.timeout(config.timeout)
|
||||
};
|
||||
|
||||
HttpClient::build(http_builder).context(error::InitBackendSnafu)
|
||||
|
||||
@@ -30,13 +30,15 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
|
||||
azblob_config.container, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&azblob_config.http_client)?;
|
||||
|
||||
let mut builder = Azblob::default()
|
||||
.root(&root)
|
||||
.container(&azblob_config.container)
|
||||
.endpoint(&azblob_config.endpoint)
|
||||
.account_name(azblob_config.account_name.expose_secret())
|
||||
.account_key(azblob_config.account_key.expose_secret())
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client);
|
||||
|
||||
if let Some(token) = &azblob_config.sas_token {
|
||||
builder = builder.sas_token(token);
|
||||
|
||||
@@ -29,6 +29,8 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
|
||||
gcs_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&gcs_config.http_client);
|
||||
|
||||
let builder = Gcs::default()
|
||||
.root(&root)
|
||||
.bucket(&gcs_config.bucket)
|
||||
@@ -36,7 +38,7 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
|
||||
.credential_path(gcs_config.credential_path.expose_secret())
|
||||
.credential(gcs_config.credential.expose_secret())
|
||||
.endpoint(&gcs_config.endpoint)
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client?);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
|
||||
@@ -29,13 +29,15 @@ pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<Objec
|
||||
oss_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&oss_config.http_client)?;
|
||||
|
||||
let builder = Oss::default()
|
||||
.root(&root)
|
||||
.bucket(&oss_config.bucket)
|
||||
.endpoint(&oss_config.endpoint)
|
||||
.access_key_id(oss_config.access_key_id.expose_secret())
|
||||
.access_key_secret(oss_config.access_key_secret.expose_secret())
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
|
||||
@@ -30,12 +30,14 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
|
||||
s3_config.bucket, &root
|
||||
);
|
||||
|
||||
let client = build_http_client(&s3_config.http_client)?;
|
||||
|
||||
let mut builder = S3::default()
|
||||
.root(&root)
|
||||
.bucket(&s3_config.bucket)
|
||||
.access_key_id(s3_config.access_key_id.expose_secret())
|
||||
.secret_access_key(s3_config.secret_access_key.expose_secret())
|
||||
.http_client(build_http_client()?);
|
||||
.http_client(client);
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user