Compare commits

..

55 Commits

Author SHA1 Message Date
evenyag
6247de2d50 chore: Revert "feat: prune in each partition"
This reverts commit 3f9bf48161.
2024-11-08 23:57:18 +08:00
evenyag
a2eb46132f feat: tokio dump 2024-11-08 23:08:47 +08:00
evenyag
3f9bf48161 feat: prune in each partition 2024-11-08 21:31:03 +08:00
evenyag
9bd2e006b5 feat: file output thread id 2024-11-08 20:35:40 +08:00
evenyag
031421ca91 feat: add thread id to log 2024-11-08 19:09:08 +08:00
evenyag
999f3a40c2 chore: Revert "chore: Revert "feat: yield after get ranges""
This reverts commit 770a850437.
2024-11-08 17:12:54 +08:00
evenyag
50d28e0a00 feat: add timeout to build ranges 2024-11-08 16:23:03 +08:00
evenyag
770a850437 chore: Revert "feat: yield after get ranges"
This reverts commit 65e53b5bc4.
2024-11-08 15:35:23 +08:00
evenyag
65e53b5bc4 feat: yield after get ranges 2024-11-08 01:28:39 +08:00
evenyag
9a6c7aa4d6 chore: log label 2024-11-08 01:08:02 +08:00
evenyag
4f446b95d8 chore: logs 2024-11-08 01:01:27 +08:00
evenyag
9ad4200f55 feat: only log for unordered scan 2024-11-08 00:58:29 +08:00
evenyag
53d456651f chore: range builder logs 2024-11-08 00:11:54 +08:00
evenyag
f11c5acb0f feat: logs for debug prune cost 2024-11-07 22:10:46 +08:00
evenyag
8536a1ec6e chore: logs to debug hang 2024-11-07 20:36:12 +08:00
evenyag
fce8c968da feat: gauge for scan partition 2024-11-07 16:55:40 +08:00
evenyag
98a6ac973c feat: log on merge scan region start/end 2024-11-07 16:48:03 +08:00
evenyag
8f79e421c3 chore: Revert "feat: remove too large files"
This reverts commit a22667bf3c.
2024-11-07 16:20:39 +08:00
evenyag
e8b326382f chore: fix compile 2024-11-07 00:28:19 +08:00
evenyag
56781e7fbc fix: skip expired files 2024-11-07 00:25:52 +08:00
evenyag
7d342b3d95 feat: small max file size 2024-11-06 23:31:16 +08:00
evenyag
a22667bf3c feat: remove too large files 2024-11-06 22:08:43 +08:00
evenyag
29b9b7db0c feat: support compression method 2024-11-06 18:58:20 +08:00
evenyag
a66909a562 chore: fix compile 2024-11-06 16:29:18 +08:00
evenyag
8137b8ff3d chore: more logs 2024-11-06 15:25:49 +08:00
Ruihang Xia
7c5cd2922a fix split logic
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-05 23:43:35 +08:00
evenyag
a1d0dcf2c3 chore: more logs 2024-11-05 20:25:05 +08:00
evenyag
c391171f99 feat: more logs 2024-11-05 20:18:35 +08:00
evenyag
f44862aaac feat: update log 2024-11-05 17:47:32 +08:00
evenyag
8bf795d88c chore: more logs 2024-11-05 16:22:54 +08:00
evenyag
3bbf4e0232 feat: log range meta 2024-11-05 16:01:55 +08:00
evenyag
83da3950da chore: debug 2024-11-05 15:33:42 +08:00
evenyag
957b5effd5 chore: fix compile 2024-11-05 15:32:35 +08:00
evenyag
f59e28006a feat: assert precision 2024-11-05 15:24:40 +08:00
evenyag
3e5bbdf71e feat: enable batch checker 2024-11-05 15:24:40 +08:00
Ruihang Xia
b8ac19c480 log on wrong range index
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-05 14:57:44 +08:00
evenyag
92b274a856 chore: log compute cost 2024-11-05 14:57:54 +08:00
evenyag
6bdac25f0a chore: more logs 2024-11-05 13:02:16 +08:00
evenyag
a9f3c4b17c chore: page reader metrics 2024-11-04 20:08:56 +08:00
evenyag
e003eaab36 chore: more log 2024-11-04 20:06:20 +08:00
evenyag
6e590da412 chore: remove compaction skip log 2024-11-04 19:40:42 +08:00
evenyag
ff5fa40b85 feat: skip wal 2024-11-04 19:40:41 +08:00
Ruihang Xia
d4aa4159d4 feat: support windowed sort with where condition
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-04 19:34:03 +08:00
evenyag
960f6d821b feat: spawn block write wal 2024-11-04 17:35:12 +08:00
Ruihang Xia
9c5d044238 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-01 17:45:28 +08:00
Ruihang Xia
70c354eed6 fix: the way to retrieve time index column
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-01 12:10:12 +08:00
Ruihang Xia
23bf663d58 feat: handle sort that wont preserving partition
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-31 22:13:36 +08:00
Ruihang Xia
817648eac5 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-31 15:38:12 +08:00
Ruihang Xia
03b29439e2 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-11 11:09:07 +08:00
Ruihang Xia
712f4ca0ef try sort partial commutative
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-09 21:08:59 +08:00
Ruihang Xia
60bacff57e ignore unmatched left and right greater
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-08 11:12:21 +08:00
Ruihang Xia
6208772ba4 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-08 11:02:04 +08:00
Ruihang Xia
67184c0498 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 14:30:47 +08:00
Ruihang Xia
1dd908fdf7 handle group by
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 12:50:13 +08:00
Ruihang Xia
8179b4798e feat: support transforming min/max/count aggr fn
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-04 22:17:31 +08:00
380 changed files with 7622 additions and 22768 deletions

View File

@@ -1,14 +0,0 @@
#!/bin/sh
set -e
# Get the latest version of github.com/GreptimeTeam/greptimedb
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
echo "Downloading the latest version: $VERSION"
# Download the install script
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
# Execute the `greptime` command
./greptime --version

View File

@@ -22,10 +22,6 @@ jobs:
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check install.sh
run: ./.github/scripts/check-install-script.sh
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:

View File

@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.11.0
NEXT_RELEASE_VERSION: v0.10.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:

View File

@@ -17,6 +17,6 @@ repos:
- id: fmt
- id: clippy
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
stages: [pre-push]
stages: [push]
- id: cargo-check
args: ["--workspace", "--all-targets", "--all-features"]

405
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,25 +2,23 @@
members = [
"src/api",
"src/auth",
"src/cache",
"src/catalog",
"src/cache",
"src/client",
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/config",
"src/common/datasource",
"src/common/decimal",
"src/common/error",
"src/common/frontend",
"src/common/function",
"src/common/macro",
"src/common/greptimedb-telemetry",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/macro",
"src/common/mem-prof",
"src/common/meta",
"src/common/options",
"src/common/plugins",
"src/common/pprof",
"src/common/procedure",
@@ -32,6 +30,7 @@ members = [
"src/common/telemetry",
"src/common/test-util",
"src/common/time",
"src/common/decimal",
"src/common/version",
"src/common/wal",
"src/datanode",
@@ -39,7 +38,6 @@ members = [
"src/file-engine",
"src/flow",
"src/frontend",
"src/index",
"src/log-store",
"src/meta-client",
"src/meta-srv",
@@ -59,6 +57,7 @@ members = [
"src/sql",
"src/store-api",
"src/table",
"src/index",
"tests-fuzz",
"tests-integration",
"tests/runner",
@@ -66,7 +65,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.10.2"
version = "0.9.5"
edition = "2021"
license = "Apache-2.0"
@@ -118,16 +117,15 @@ datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = "0.13"
etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
hex = "0.4"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
@@ -153,7 +151,7 @@ raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
ratelimit = "0.9"
regex = "1.8"
regex-automata = "0.4"
regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [
"json",
"rustls-tls-native-roots",
@@ -184,11 +182,11 @@ strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7"
tokio-stream = "0.1"
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = "0.4"
tower = { version = "0.4" }
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
@@ -216,7 +214,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-options = { path = "src/common/options" }
common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" }
@@ -264,21 +261,21 @@ tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
# Apply a fix for pprof for unaligned pointer access
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
[profile.release]
debug = 1
# debug = 1
split-debuginfo = "off"
[profile.nightly]
inherits = "release"
strip = "debuginfo"
split-debuginfo = "off"
# strip = "debuginfo"
lto = "thin"
debug = false
# debug = false
incremental = false
[profile.ci]

View File

@@ -6,7 +6,7 @@
</picture>
</p>
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2>
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
<div align="center">
<h3 align="center">
@@ -48,21 +48,9 @@
</a>
</div>
- [Introduction](#introduction)
- [**Features: Why GreptimeDB**](#why-greptimedb)
- [Architecture](https://docs.greptime.com/contributor-guide/overview/#architecture)
- [Try it for free](#try-greptimedb)
- [Getting Started](#getting-started)
- [Project Status](#project-status)
- [Join the community](#community)
- [Contributing](#contributing)
- [Extension](#extension )
- [License](#license)
- [Acknowledgement](#acknowledgement)
## Introduction
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
## Why GreptimeDB

View File

@@ -93,8 +93,8 @@
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
@@ -109,11 +109,6 @@
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -131,9 +126,9 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
@@ -421,8 +416,8 @@
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
@@ -437,11 +432,6 @@
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -459,9 +449,9 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |

View File

@@ -294,14 +294,14 @@ data_home = "/tmp/greptimedb/"
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File"
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory.
## @toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
## The local file cache capacity in bytes.
## @toml2docs:none-default
cache_capacity = "1GiB"
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
@@ -375,23 +375,6 @@ endpoint = "https://s3.amazonaws.com"
## @toml2docs:none-default
region = "us-west-2"
## The http client options to the storage.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
[storage.http_client]
## The maximum idle connection per host allowed in the pool.
pool_max_idle_per_host = 1024
## The timeout for only the connect phase of a http client.
connect_timeout = "30s"
## The total request timeout, applied from when the request starts connecting until the response body has finished.
## Also considered a total deadline.
timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
# Custom storage options
# [[storage.providers]]
# name = "S3"
@@ -476,14 +459,14 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
experimental_write_cache_size = "1GiB"
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
## @toml2docs:none-default

View File

@@ -332,14 +332,14 @@ data_home = "/tmp/greptimedb/"
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File"
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory.
## @toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
## The local file cache capacity in bytes.
## @toml2docs:none-default
cache_capacity = "1GiB"
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
@@ -413,23 +413,6 @@ endpoint = "https://s3.amazonaws.com"
## @toml2docs:none-default
region = "us-west-2"
## The http client options to the storage.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
[storage.http_client]
## The maximum idle connection per host allowed in the pool.
pool_max_idle_per_host = 1024
## The timeout for only the connect phase of a http client.
connect_timeout = "30s"
## The total request timeout, applied from when the request starts connecting until the response body has finished.
## Also considered a total deadline.
timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
# Custom storage options
# [[storage.providers]]
# name = "S3"
@@ -514,14 +497,14 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
experimental_write_cache_size = "1GiB"
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
## @toml2docs:none-default

View File

@@ -4,13 +4,13 @@
example:
```bash
curl --data "trace,flow=debug" 127.0.0.1:4000/debug/log_level
curl --data "trace;flow=debug" 127.0.0.1:4000/debug/log_level
```
And database will reply with something like:
```bash
Log Level changed from Some("info") to "trace,flow=debug"%
Log Level changed from Some("info") to "trace;flow=debug"%
```
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`.
The data is a string in the format of `global_level;module1=level1;module2=level2;...` that follow the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).

View File

@@ -5,13 +5,6 @@ GreptimeDB's official Grafana dashboard.
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
If you use Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
The standalone GreptimeDB instance will collect metrics from your cluster and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
# How to use
## `greptimedb.json`

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
#!/bin/sh
#!/usr/bin/env bash
set -ue
@@ -15,7 +15,7 @@ GITHUB_ORG=GreptimeTeam
GITHUB_REPO=greptimedb
BIN=greptime
get_os_type() {
function get_os_type() {
os_type="$(uname -s)"
case "$os_type" in
@@ -31,7 +31,7 @@ get_os_type() {
esac
}
get_arch_type() {
function get_arch_type() {
arch_type="$(uname -m)"
case "$arch_type" in
@@ -53,7 +53,7 @@ get_arch_type() {
esac
}
download_artifact() {
function download_artifact() {
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
# Use the latest stable released version.
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.

View File

@@ -36,14 +36,15 @@ use datatypes::vectors::{
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
UInt64Vector, VectorRef,
};
use greptime_proto::v1;
use greptime_proto::v1::column_data_type_extension::TypeExt;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension,
QueryRequest, Row, SemanticType, VectorTypeExtension,
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension, QueryRequest,
Row, SemanticType,
};
use paste::paste;
use snafu::prelude::*;
@@ -149,17 +150,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ConcreteDataType::decimal128_default_datatype()
}
}
ColumnDataType::Vector => {
if let Some(TypeExt::VectorType(d)) = datatype_wrapper
.datatype_ext
.as_ref()
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
{
ConcreteDataType::vector_datatype(d.dim)
} else {
ConcreteDataType::vector_default_datatype()
}
}
}
}
}
@@ -241,15 +231,6 @@ impl ColumnDataTypeWrapper {
}),
}
}
pub fn vector_datatype(dim: u32) -> Self {
ColumnDataTypeWrapper {
datatype: ColumnDataType::Vector,
datatype_ext: Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::VectorType(VectorTypeExtension { dim })),
}),
}
}
}
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
@@ -268,7 +249,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
ConcreteDataType::Binary(_) | ConcreteDataType::Json(_) => ColumnDataType::Binary,
ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
@@ -290,8 +271,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
},
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
ConcreteDataType::Json(_) => ColumnDataType::Json,
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
@@ -310,17 +289,15 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
})),
})
}
ColumnDataType::Json => datatype.as_json().map(|_| ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
}),
ColumnDataType::Vector => {
datatype
.as_vector()
.map(|vector_type| ColumnDataTypeExtension {
type_ext: Some(TypeExt::VectorType(VectorTypeExtension {
dim: vector_type.dim as _,
})),
ColumnDataType::Binary => {
if datatype == ConcreteDataType::json_datatype() {
// Json is the same as binary in proto. The extension marks the binary in proto is actually a json.
Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
})
} else {
None
}
}
_ => None,
};
@@ -445,10 +422,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
string_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Vector => Values {
binary_values: Vec::with_capacity(capacity),
..Default::default()
},
}
}
@@ -527,14 +500,13 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
match request.expr {
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
Some(Expr::CreateTable(_)) => "ddl.create_table",
Some(Expr::AlterTable(_)) => "ddl.alter_table",
Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
Some(Expr::CreateView(_)) => "ddl.create_view",
Some(Expr::DropView(_)) => "ddl.drop_view",
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
None => "ddl.empty",
}
}
@@ -701,7 +673,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
}),
)),
ConcreteDataType::Vector(_) => Arc::new(BinaryVector::from_vec(values.binary_values)),
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
@@ -867,7 +838,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
))
})
.collect(),
ConcreteDataType::Vector(_) => values.binary_values.into_iter().map(|v| v.into()).collect(),
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
@@ -892,7 +862,10 @@ pub fn is_column_type_value_eq(
ColumnDataTypeWrapper::try_new(type_value, type_extension)
.map(|wrapper| {
let datatype = ConcreteDataType::from(wrapper);
expect_type == &datatype
(datatype == *expect_type)
// Json type leverage binary type in pb, so this is valid.
|| (datatype == ConcreteDataType::binary_datatype()
&& *expect_type == ConcreteDataType::json_datatype())
})
.unwrap_or(false)
}
@@ -1179,10 +1152,6 @@ mod tests {
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
let values = values.decimal128_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::Vector, 2);
let values = values.binary_values;
assert_eq!(2, values.capacity());
}
#[test]
@@ -1270,11 +1239,7 @@ mod tests {
assert_eq!(
ConcreteDataType::decimal128_datatype(10, 2),
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
);
assert_eq!(
ConcreteDataType::vector_datatype(3),
ColumnDataTypeWrapper::vector_datatype(3).into()
);
)
}
#[test]
@@ -1370,10 +1335,6 @@ mod tests {
.try_into()
.unwrap()
);
assert_eq!(
ColumnDataTypeWrapper::vector_datatype(3),
ConcreteDataType::vector_datatype(3).try_into().unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());

View File

@@ -15,10 +15,8 @@
use std::collections::HashMap;
use datatypes::schema::{
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
FULLTEXT_KEY, INVERTED_INDEX_KEY,
ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
};
use greptime_proto::v1::Analyzer;
use snafu::ResultExt;
use crate::error::{self, Result};
@@ -27,8 +25,6 @@ use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
/// Key used to store fulltext options in gRPC column options.
const FULLTEXT_GRPC_KEY: &str = "fulltext";
/// Key used to store inverted index options in gRPC column options.
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
@@ -53,13 +49,10 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
if !column_def.comment.is_empty() {
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
}
if let Some(options) = column_def.options.as_ref() {
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
}
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
}
if let Some(options) = column_def.options.as_ref()
&& let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
{
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
}
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
@@ -77,12 +70,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
options
.options
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
}
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
options
.options
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
}
(!options.options.is_empty()).then_some(options)
@@ -105,14 +93,6 @@ pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<Column
Ok((!options.options.is_empty()).then_some(options))
}
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
match analyzer {
Analyzer::English => FulltextAnalyzer::English,
Analyzer::Chinese => FulltextAnalyzer::Chinese,
}
}
#[cfg(test)]
mod tests {
@@ -135,13 +115,10 @@ mod tests {
comment: "test_comment".to_string(),
datatype_extension: None,
options: Some(ColumnOptions {
options: HashMap::from([
(
FULLTEXT_GRPC_KEY.to_string(),
"{\"enable\":true}".to_string(),
),
(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string()),
]),
options: HashMap::from([(
FULLTEXT_GRPC_KEY.to_string(),
"{\"enable\":true}".to_string(),
)]),
}),
};
@@ -162,7 +139,6 @@ mod tests {
..Default::default()
}
);
assert!(schema.is_inverted_indexed());
}
#[test]
@@ -177,17 +153,12 @@ mod tests {
analyzer: FulltextAnalyzer::English,
case_sensitive: false,
})
.unwrap()
.set_inverted_index(true);
.unwrap();
let options = options_from_column_schema(&schema).unwrap();
assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
);
assert_eq!(
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
"true"
);
}
#[test]

View File

@@ -178,12 +178,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Partition manager not found, it's not expected."))]
PartitionManagerNotFound {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to find table partitions"))]
FindPartitions { source: partition::error::Error },
@@ -307,7 +301,6 @@ impl ErrorExt for Error {
| Error::CastManager { .. }
| Error::Json { .. }
| Error::GetInformationExtension { .. }
| Error::PartitionManagerNotFound { .. }
| Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected,
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend};
mod client;
mod manager;

View File

@@ -22,7 +22,6 @@ use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
use common_meta::kv_backend::txn::{Txn, TxnResponse};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
@@ -43,20 +42,20 @@ const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
pub struct CachedKvBackendBuilder {
pub struct CachedMetaKvBackendBuilder {
cache_max_capacity: Option<u64>,
cache_ttl: Option<Duration>,
cache_tti: Option<Duration>,
inner: KvBackendRef,
meta_client: Arc<MetaClient>,
}
impl CachedKvBackendBuilder {
pub fn new(inner: KvBackendRef) -> Self {
impl CachedMetaKvBackendBuilder {
pub fn new(meta_client: Arc<MetaClient>) -> Self {
Self {
cache_max_capacity: None,
cache_ttl: None,
cache_tti: None,
inner,
meta_client,
}
}
@@ -75,7 +74,7 @@ impl CachedKvBackendBuilder {
self
}
pub fn build(self) -> CachedKvBackend {
pub fn build(self) -> CachedMetaKvBackend {
let cache_max_capacity = self
.cache_max_capacity
.unwrap_or(DEFAULT_CACHE_MAX_CAPACITY);
@@ -86,11 +85,14 @@ impl CachedKvBackendBuilder {
.time_to_live(cache_ttl)
.time_to_idle(cache_tti)
.build();
let kv_backend = self.inner;
let kv_backend = Arc::new(MetaKvBackend {
client: self.meta_client,
});
let name = format!("CachedKvBackend({})", kv_backend.name());
let version = AtomicUsize::new(0);
CachedKvBackend {
CachedMetaKvBackend {
kv_backend,
cache,
name,
@@ -110,29 +112,19 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
/// Therefore, it is recommended to use CachedMetaKvBackend to only read metadata related
/// information. Note: If you read other information, you may read expired data, which depends on
/// TTL and TTI for cache.
pub struct CachedKvBackend {
pub struct CachedMetaKvBackend {
kv_backend: KvBackendRef,
cache: CacheBackend,
name: String,
version: AtomicUsize,
}
#[async_trait::async_trait]
impl TxnService for CachedKvBackend {
impl TxnService for CachedMetaKvBackend {
type Error = Error;
async fn txn(&self, txn: Txn) -> std::result::Result<TxnResponse, Self::Error> {
// TODO(hl): txn of CachedKvBackend simply pass through to inner backend without invalidating caches.
self.kv_backend.txn(txn).await
}
fn max_txn_ops(&self) -> usize {
self.kv_backend.max_txn_ops()
}
}
#[async_trait::async_trait]
impl KvBackend for CachedKvBackend {
impl KvBackend for CachedMetaKvBackend {
fn name(&self) -> &str {
&self.name
}
@@ -313,7 +305,7 @@ impl KvBackend for CachedKvBackend {
}
#[async_trait::async_trait]
impl KvCacheInvalidator for CachedKvBackend {
impl KvCacheInvalidator for CachedMetaKvBackend {
async fn invalidate_key(&self, key: &[u8]) {
self.create_new_version();
self.cache.invalidate(key).await;
@@ -321,7 +313,7 @@ impl KvCacheInvalidator for CachedKvBackend {
}
}
impl CachedKvBackend {
impl CachedMetaKvBackend {
// only for test
#[cfg(test)]
fn wrap(kv_backend: KvBackendRef) -> Self {
@@ -474,7 +466,7 @@ mod tests {
use common_meta::rpc::KeyValue;
use dashmap::DashMap;
use super::CachedKvBackend;
use super::CachedMetaKvBackend;
#[derive(Default)]
pub struct SimpleKvBackend {
@@ -548,7 +540,7 @@ mod tests {
async fn test_cached_kv_backend() {
let simple_kv = Arc::new(SimpleKvBackend::default());
let get_execute_times = simple_kv.get_execute_times.clone();
let cached_kv = CachedKvBackend::wrap(simple_kv);
let cached_kv = CachedMetaKvBackend::wrap(simple_kv);
add_some_vals(&cached_kv).await;

View File

@@ -34,14 +34,15 @@ use datatypes::vectors::{
};
use futures::{StreamExt, TryStreamExt};
use partition::manager::PartitionInfo;
use partition::partition::PartitionDef;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
use store_api::storage::{RegionId, ScanRequest, TableId};
use table::metadata::{TableInfo, TableType};
use super::PARTITIONS;
use crate::error::{
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
Result, UpgradeWeakCatalogManagerRefSnafu,
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
UpgradeWeakCatalogManagerRefSnafu,
};
use crate::kvbackend::KvBackendCatalogManager;
use crate::system_schema::information_schema::{InformationTable, Predicates};
@@ -235,8 +236,7 @@ impl InformationSchemaPartitionsBuilder {
let partition_manager = catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.map(|catalog_manager| catalog_manager.partition_manager())
.context(PartitionManagerNotFoundSnafu)?;
.map(|catalog_manager| catalog_manager.partition_manager());
let predicates = Predicates::from_scan_request(&request);
@@ -262,10 +262,27 @@ impl InformationSchemaPartitionsBuilder {
let table_ids: Vec<TableId> =
table_infos.iter().map(|info| info.ident.table_id).collect();
let mut table_partitions = partition_manager
.batch_find_table_partitions(&table_ids)
.await
.context(FindPartitionsSnafu)?;
let mut table_partitions = if let Some(partition_manager) = &partition_manager {
partition_manager
.batch_find_table_partitions(&table_ids)
.await
.context(FindPartitionsSnafu)?
} else {
// Current node must be a standalone instance, contains only one partition by default.
// TODO(dennis): change it when we support multi-regions for standalone.
table_ids
.into_iter()
.map(|table_id| {
(
table_id,
vec![PartitionInfo {
id: RegionId::new(table_id, 0),
partition: PartitionDef::new(vec![], vec![]),
}],
)
})
.collect()
};
for table_info in table_infos {
let partitions = table_partitions

View File

@@ -180,7 +180,7 @@ impl InformationSchemaSchemataBuilder {
.context(TableMetadataManagerSnafu)?
// information_schema is not available from this
// table_metadata_manager and we return None
.map(|schema_opts| format!("{}", schema_opts.into_inner()))
.map(|schema_opts| format!("{schema_opts}"))
} else {
None
};

View File

@@ -12,16 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::{INFORMATION_SCHEMA_TABLES_TABLE_ID, MITO_ENGINE};
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
use common_error::ext::BoxedError;
use common_meta::datanode::RegionStat;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::error;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
@@ -34,7 +31,7 @@ use datatypes::vectors::{
};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{RegionId, ScanRequest, TableId};
use store_api::storage::{ScanRequest, TableId};
use table::metadata::{TableInfo, TableType};
use super::TABLES;
@@ -42,7 +39,6 @@ use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::system_schema::information_schema::{InformationTable, Predicates};
use crate::system_schema::utils;
use crate::CatalogManager;
pub const TABLE_CATALOG: &str = "table_catalog";
@@ -238,51 +234,17 @@ impl InformationSchemaTablesBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
let information_extension = utils::information_extension(&self.catalog_manager)?;
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
// But we don't want the statements such as `show tables` fail,
// so using `unwrap_or_else` here instead of `?` operator.
let region_stats = information_extension
.region_stats()
.await
.map_err(|e| {
error!(e; "Failed to call region_stats");
e
})
.unwrap_or_else(|_| vec![]);
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();
// TODO(dennis): make it working for metric engine
let table_region_stats =
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
let region_ids = table_info
.meta
.region_numbers
.iter()
.map(|n| RegionId::new(table_info.ident.table_id, *n))
.collect::<HashSet<_>>();
region_stats
.iter()
.filter(|stat| region_ids.contains(&stat.id))
.collect::<Vec<_>>()
} else {
vec![]
};
self.add_table(
&predicates,
&catalog_name,
&schema_name,
table_info,
table.table_type(),
&table_region_stats,
);
}
}
@@ -298,7 +260,6 @@ impl InformationSchemaTablesBuilder {
schema_name: &str,
table_info: Arc<TableInfo>,
table_type: TableType,
region_stats: &[&RegionStat],
) {
let table_name = table_info.name.as_ref();
let table_id = table_info.table_id();
@@ -312,9 +273,7 @@ impl InformationSchemaTablesBuilder {
let row = [
(TABLE_CATALOG, &Value::from(catalog_name)),
(TABLE_ID, &Value::from(table_id)),
(TABLE_SCHEMA, &Value::from(schema_name)),
(ENGINE, &Value::from(engine)),
(TABLE_NAME, &Value::from(table_name)),
(TABLE_TYPE, &Value::from(table_type_text)),
];
@@ -328,39 +287,21 @@ impl InformationSchemaTablesBuilder {
self.table_names.push(Some(table_name));
self.table_types.push(Some(table_type_text));
self.table_ids.push(Some(table_id));
let data_length = region_stats.iter().map(|stat| stat.sst_size).sum();
let table_rows = region_stats.iter().map(|stat| stat.num_rows).sum();
let index_length = region_stats.iter().map(|stat| stat.index_size).sum();
// It's not precise, but it is acceptable for long-term data storage.
let avg_row_length = if table_rows > 0 {
let total_data_length = data_length
+ region_stats
.iter()
.map(|stat| stat.memtable_size)
.sum::<u64>();
total_data_length / table_rows
} else {
0
};
self.data_length.push(Some(data_length));
self.index_length.push(Some(index_length));
self.table_rows.push(Some(table_rows));
self.avg_row_length.push(Some(avg_row_length));
// TODO(sunng87): use real data for these fields
self.data_length.push(Some(0));
self.max_data_length.push(Some(0));
self.checksum.push(Some(0));
self.index_length.push(Some(0));
self.avg_row_length.push(Some(0));
self.max_index_length.push(Some(0));
self.checksum.push(Some(0));
self.table_rows.push(Some(0));
self.data_free.push(Some(0));
self.auto_increment.push(Some(0));
self.row_format.push(Some("Fixed"));
self.table_collation.push(Some("utf8_bin"));
self.update_time.push(None);
self.check_time.push(None);
// use mariadb default table version number here
self.version.push(Some(11));
self.table_comment.push(table_info.desc.as_deref());

View File

@@ -18,7 +18,7 @@ use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
AlterTableExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
QueryRequest, RequestHeader,
};
use arrow_flight::Ticket;
@@ -211,9 +211,9 @@ impl Database {
.await
}
pub async fn alter(&self, expr: AlterTableExpr) -> Result<Output> {
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::AlterTable(expr)),
expr: Some(DdlExpr::Alter(expr)),
}))
.await
}

View File

@@ -33,7 +33,6 @@ common-error.workspace = true
common-grpc.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-options.workspace = true
common-procedure.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
@@ -53,7 +52,6 @@ flow.workspace = true
frontend = { workspace = true, default-features = false }
futures.workspace = true
human-panic = "2.0"
humantime.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true

View File

@@ -12,15 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use base64::engine::general_purpose;
use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use humantime::format_duration;
use serde_json::Value;
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
@@ -30,16 +26,10 @@ pub(crate) struct DatabaseClient {
addr: String,
catalog: String,
auth_header: Option<String>,
timeout: Duration,
}
impl DatabaseClient {
pub fn new(
addr: String,
catalog: String,
auth_basic: Option<String>,
timeout: Duration,
) -> Self {
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
@@ -51,7 +41,6 @@ impl DatabaseClient {
addr,
catalog,
auth_header,
timeout,
}
}
@@ -74,11 +63,6 @@ impl DatabaseClient {
request = request.header("Authorization", auth);
}
request = request.header(
GREPTIME_DB_HEADER_TIMEOUT,
format_duration(self.timeout).to_string(),
);
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
})?;

View File

@@ -15,7 +15,6 @@
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
@@ -84,26 +83,14 @@ pub struct ExportCommand {
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
/// The timeout of invoking the database.
///
/// It is used to override the server-side timeout setting.
/// The default behavior will disable server-side default timeout(i.e. `0s`).
#[clap(long, value_parser = humantime::parse_duration)]
timeout: Option<Duration>,
}
impl ExportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
let database_client = DatabaseClient::new(
self.addr.clone(),
catalog.clone(),
self.auth_basic.clone(),
// Treats `None` as `0s` to disable server-side default timeout.
self.timeout.unwrap_or_default(),
);
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Export {

View File

@@ -14,7 +14,6 @@
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
@@ -69,25 +68,13 @@ pub struct ImportCommand {
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
/// The timeout of invoking the database.
///
/// It is used to override the server-side timeout setting.
/// The default behavior will disable server-side default timeout(i.e. `0s`).
#[clap(long, value_parser = humantime::parse_duration)]
timeout: Option<Duration>,
}
impl ImportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
let database_client = DatabaseClient::new(
self.addr.clone(),
catalog.clone(),
self.auth_basic.clone(),
// Treats `None` as `0s` to disable server-side default timeout.
self.timeout.unwrap_or_default(),
);
let database_client =
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
Ok(Instance::new(
Box::new(Import {

View File

@@ -21,14 +21,13 @@ use cache::{
TABLE_ROUTE_CACHE_NAME,
};
use catalog::kvbackend::{
CachedKvBackend, CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_config::Mode;
use common_error::ext::ErrorExt;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::kv_backend::KvBackendRef;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::debug;
@@ -259,9 +258,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
.context(StartMetaClientSnafu)?;
let meta_client = Arc::new(meta_client);
let cached_meta_backend = Arc::new(
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone()))).build(),
);
let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
CacheRegistryBuilder::default()
.add_cache(cached_meta_backend.clone())

View File

@@ -15,7 +15,7 @@
use std::sync::Arc;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use clap::Parser;
use client::client_manager::NodeClients;
use common_base::Plugins;
@@ -246,12 +246,11 @@ impl StartCommand {
let cache_tti = meta_config.metadata_cache_tti;
// TODO(discord9): add helper function to ease the creation of cache registry&such
let cached_meta_backend =
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
.cache_max_capacity(cache_max_capacity)
.cache_ttl(cache_ttl)
.cache_tti(cache_tti)
.build();
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
.cache_max_capacity(cache_max_capacity)
.cache_ttl(cache_ttl)
.cache_tti(cache_tti)
.build();
let cached_meta_backend = Arc::new(cached_meta_backend);
// Builds cache registry

View File

@@ -17,7 +17,7 @@ use std::time::Duration;
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use clap::Parser;
use client::client_manager::NodeClients;
use common_base::Plugins;
@@ -293,12 +293,11 @@ impl StartCommand {
.context(MetaClientInitSnafu)?;
// TODO(discord9): add helper function to ease the creation of cache registry&such
let cached_meta_backend =
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
.cache_max_capacity(cache_max_capacity)
.cache_ttl(cache_ttl)
.cache_tti(cache_tti)
.build();
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
.cache_max_capacity(cache_max_capacity)
.cache_ttl(cache_ttl)
.cache_tti(cache_tti)
.build();
let cached_meta_backend = Arc::new(cached_meta_backend);
// Builds cache registry

View File

@@ -43,31 +43,6 @@ lazy_static::lazy_static! {
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
}
/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
#[cfg(unix)]
async fn start_wait_for_close_signal() -> std::io::Result<()> {
use tokio::signal::unix::{signal, SignalKind};
let mut sigint = signal(SignalKind::interrupt())?;
let mut sigterm = signal(SignalKind::terminate())?;
tokio::select! {
_ = sigint.recv() => {
info!("Received SIGINT, shutting down");
}
_ = sigterm.recv() => {
info!("Received SIGTERM, shutting down");
}
}
Ok(())
}
/// wait for the close signal, for non-unix platform it's ctrl-c
#[cfg(not(unix))]
async fn start_wait_for_close_signal() -> std::io::Result<()> {
tokio::signal::ctrl_c().await
}
#[async_trait]
pub trait App: Send {
fn name(&self) -> &str;
@@ -94,9 +69,9 @@ pub trait App: Send {
self.start().await?;
if self.wait_signal() {
if let Err(e) = start_wait_for_close_signal().await {
error!(e; "Failed to listen for close signal");
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
if let Err(e) = tokio::signal::ctrl_c().await {
error!(e; "Failed to listen for ctrl-c signal");
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
// the underlying system. So we stop the app instead of running nonetheless to let people
// investigate the issue.
}

View File

@@ -20,13 +20,13 @@ use common_config::Configurable;
use common_grpc::channel_manager::{
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
};
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_ENDPOINT};
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::config::DatanodeWalConfig;
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use file_engine::config::EngineConfig;
use frontend::frontend::FrontendOptions;
use frontend::service_config::datanode::DatanodeClientOptions;
use meta_client::MetaClientOptions;
use meta_srv::metasrv::MetasrvOptions;
use meta_srv::selector::SelectorType;
@@ -126,11 +126,10 @@ fn test_load_frontend_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
datanode: DatanodeClientOptions {
client: ClientOptions {
datanode: frontend::service_config::DatanodeOptions {
client: DatanodeClientOptions {
connect_timeout: Duration::from_secs(10),
tcp_nodelay: true,
..Default::default()
},
},
export_metrics: ExportMetricsOption {
@@ -167,8 +166,8 @@ fn test_load_metasrv_example_config() {
},
..Default::default()
},
datanode: DatanodeClientOptions {
client: ClientOptions {
datanode: meta_srv::metasrv::DatanodeOptions {
client: meta_srv::metasrv::DatanodeClientOptions {
timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(10),
tcp_nodelay: true,

View File

@@ -16,12 +16,9 @@ common-error.workspace = true
common-macro.workspace = true
futures.workspace = true
paste = "1.0"
pin-project.workspace = true
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true
tokio.workspace = true
zeroize = { version = "1.6", default-features = false, features = ["alloc"] }
[dev-dependencies]
common-test-util.workspace = true
toml.workspace = true

View File

@@ -12,20 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::future::Future;
use std::io;
use std::ops::Range;
use std::path::Path;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::{BufMut, Bytes};
use futures::AsyncRead;
use pin_project::pin_project;
use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _};
use tokio::sync::Mutex;
use futures::{AsyncReadExt, AsyncSeekExt};
/// `Metadata` contains the metadata of a source.
pub struct Metadata {
@@ -69,7 +61,7 @@ pub trait RangeReader: Send + Unpin {
}
#[async_trait]
impl<R: ?Sized + RangeReader> RangeReader for &mut R {
impl<R: RangeReader + Send + Unpin> RangeReader for &mut R {
async fn metadata(&mut self) -> io::Result<Metadata> {
(*self).metadata().await
}
@@ -88,212 +80,26 @@ impl<R: ?Sized + RangeReader> RangeReader for &mut R {
}
}
/// `AsyncReadAdapter` adapts a `RangeReader` to an `AsyncRead`.
#[pin_project]
pub struct AsyncReadAdapter<R> {
/// The inner `RangeReader`.
/// Use `Mutex` to get rid of the borrow checker issue.
inner: Arc<Mutex<R>>,
/// The current position from the view of the reader.
position: u64,
/// The buffer for the read bytes.
buffer: Vec<u8>,
/// The length of the content.
content_length: u64,
/// The future for reading the next bytes.
#[pin]
read_fut: Option<Pin<Box<dyn Future<Output = io::Result<Bytes>> + Send>>>,
}
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
pub async fn new(inner: R) -> io::Result<Self> {
let mut inner = inner;
let metadata = inner.metadata().await?;
Ok(AsyncReadAdapter {
inner: Arc::new(Mutex::new(inner)),
position: 0,
buffer: Vec::new(),
content_length: metadata.content_length,
read_fut: None,
})
}
}
/// The maximum size per read for the inner reader in `AsyncReadAdapter`.
const MAX_SIZE_PER_READ: usize = 8 * 1024 * 1024; // 8MB
impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let mut this = self.as_mut().project();
if *this.position >= *this.content_length {
return Poll::Ready(Ok(0));
}
if !this.buffer.is_empty() {
let to_read = this.buffer.len().min(buf.len());
buf[..to_read].copy_from_slice(&this.buffer[..to_read]);
this.buffer.drain(..to_read);
*this.position += to_read as u64;
return Poll::Ready(Ok(to_read));
}
if this.read_fut.is_none() {
let size = (*this.content_length - *this.position).min(MAX_SIZE_PER_READ as u64);
let range = *this.position..(*this.position + size);
let inner = this.inner.clone();
let fut = async move {
let mut inner = inner.lock().await;
inner.read(range).await
};
*this.read_fut = Some(Box::pin(fut));
}
match this
.read_fut
.as_mut()
.as_pin_mut()
.expect("checked above")
.poll(cx)
{
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(bytes)) => {
*this.read_fut = None;
if !bytes.is_empty() {
this.buffer.extend_from_slice(&bytes);
self.poll_read(cx, buf)
} else {
Poll::Ready(Ok(0))
}
}
Poll::Ready(Err(e)) => {
*this.read_fut = None;
Poll::Ready(Err(e))
}
}
}
}
/// `RangeReaderAdapter` bridges `RangeReader` and `AsyncRead + AsyncSeek`.
pub struct RangeReaderAdapter<R>(pub R);
/// Implements `RangeReader` for a type that implements `AsyncRead + AsyncSeek`.
///
/// TODO(zhongzc): It's a temporary solution for porting the codebase from `AsyncRead + AsyncSeek` to `RangeReader`.
/// Until the codebase is fully ported to `RangeReader`, remove this implementation.
#[async_trait]
impl RangeReader for Vec<u8> {
impl<R: futures::AsyncRead + futures::AsyncSeek + Send + Unpin> RangeReader
for RangeReaderAdapter<R>
{
async fn metadata(&mut self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.len() as u64,
})
let content_length = self.0.seek(io::SeekFrom::End(0)).await?;
Ok(Metadata { content_length })
}
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
range.end = range.end.min(self.len() as u64);
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
Ok(bytes)
}
}
/// `FileReader` is a `RangeReader` for reading a file.
pub struct FileReader {
content_length: u64,
position: u64,
file: tokio::fs::File,
}
impl FileReader {
/// Creates a new `FileReader` for the file at the given path.
pub async fn new(path: impl AsRef<Path>) -> io::Result<Self> {
let file = tokio::fs::File::open(path).await?;
let metadata = file.metadata().await?;
Ok(FileReader {
content_length: metadata.len(),
position: 0,
file,
})
}
}
#[async_trait]
impl RangeReader for FileReader {
async fn metadata(&mut self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.content_length,
})
}
async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
if range.start != self.position {
self.file.seek(io::SeekFrom::Start(range.start)).await?;
self.position = range.start;
}
range.end = range.end.min(self.content_length);
if range.end <= self.position {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"Start of range is out of bounds",
));
}
async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
let mut buf = vec![0; (range.end - range.start) as usize];
self.file.read_exact(&mut buf).await?;
self.position = range.end;
self.0.seek(io::SeekFrom::Start(range.start)).await?;
self.0.read_exact(&mut buf).await?;
Ok(Bytes::from(buf))
}
}
#[cfg(test)]
mod tests {
use common_test_util::temp_dir::create_named_temp_file;
use futures::io::AsyncReadExt as _;
use super::*;
#[tokio::test]
async fn test_async_read_adapter() {
let data = b"hello world";
let reader = Vec::from(data);
let mut adapter = AsyncReadAdapter::new(reader).await.unwrap();
let mut buf = Vec::new();
adapter.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, data);
}
#[tokio::test]
async fn test_async_read_adapter_large() {
let data = (0..20 * 1024 * 1024).map(|i| i as u8).collect::<Vec<u8>>();
let mut adapter = AsyncReadAdapter::new(data.clone()).await.unwrap();
let mut buf = Vec::new();
adapter.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, data);
}
#[tokio::test]
async fn test_file_reader() {
let file = create_named_temp_file();
let path = file.path();
let data = b"hello world";
tokio::fs::write(path, data).await.unwrap();
let mut reader = FileReader::new(path).await.unwrap();
let metadata = reader.metadata().await.unwrap();
assert_eq!(metadata.content_length, data.len() as u64);
let bytes = reader.read(0..metadata.content_length).await.unwrap();
assert_eq!(&*bytes, data);
let bytes = reader.read(0..5).await.unwrap();
assert_eq!(&*bytes, &data[..5]);
}
}

View File

@@ -9,7 +9,7 @@ workspace = true
[features]
default = ["geo"]
geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
geo = ["geohash", "h3o", "s2"]
[dependencies]
api.workspace = true
@@ -28,12 +28,9 @@ common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
derive_more = { version = "1", default-features = false, features = ["display"] }
geo = { version = "0.29", optional = true }
geo-types = { version = "0.7", optional = true }
geohash = { version = "0.13", optional = true }
h3o = { version = "0.6", optional = true }
jsonb.workspace = true
nalgebra = "0.33"
num = "0.4"
num-traits = "0.2"
once_cell.workspace = true
@@ -47,10 +44,8 @@ sql.workspace = true
statrs = "0.16"
store-api.workspace = true
table.workspace = true
wkt = { version = "0.11", optional = true }
[dev-dependencies]
approx = "0.5"
ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
tokio.workspace = true

View File

@@ -27,7 +27,6 @@ use crate::scalars::matches::MatchesFunction;
use crate::scalars::math::MathFunction;
use crate::scalars::numpy::NumpyFunction;
use crate::scalars::timestamp::TimestampFunction;
use crate::scalars::vector::VectorFunction;
use crate::system::SystemFunction;
use crate::table::TableFunction;
@@ -121,9 +120,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
// Json related functions
JsonFunction::register(&function_registry);
// Vector related functions
VectorFunction::register(&function_registry);
// Geo functions
#[cfg(feature = "geo")]
crate::scalars::geo::GeoFunctions::register(&function_registry);

View File

@@ -21,7 +21,6 @@ pub mod json;
pub mod matches;
pub mod math;
pub mod numpy;
pub mod vector;
#[cfg(test)]
pub(crate) mod test;

View File

@@ -17,10 +17,7 @@ pub(crate) mod encoding;
mod geohash;
mod h3;
mod helpers;
mod measure;
mod relation;
mod s2;
mod wkt;
use crate::function_registry::FunctionRegistry;
@@ -51,7 +48,6 @@ impl GeoFunctions {
registry.register(Arc::new(h3::H3CellToChildrenSize));
registry.register(Arc::new(h3::H3CellToChildPos));
registry.register(Arc::new(h3::H3ChildPosToCell));
registry.register(Arc::new(h3::H3CellContains));
// h3 grid traversal
registry.register(Arc::new(h3::H3GridDisk));
@@ -59,27 +55,10 @@ impl GeoFunctions {
registry.register(Arc::new(h3::H3GridDistance));
registry.register(Arc::new(h3::H3GridPathCells));
// h3 measurement
registry.register(Arc::new(h3::H3CellDistanceSphereKm));
registry.register(Arc::new(h3::H3CellDistanceEuclideanDegree));
// s2
registry.register(Arc::new(s2::S2LatLngToCell));
registry.register(Arc::new(s2::S2CellLevel));
registry.register(Arc::new(s2::S2CellToToken));
registry.register(Arc::new(s2::S2CellParent));
// spatial data type
registry.register(Arc::new(wkt::LatLngToPointWkt));
// spatial relation
registry.register(Arc::new(relation::STContains));
registry.register(Arc::new(relation::STWithin));
registry.register(Arc::new(relation::STIntersects));
// spatial measure
registry.register(Arc::new(measure::STDistance));
registry.register(Arc::new(measure::STDistanceSphere));
registry.register(Arc::new(measure::STArea));
}
}

View File

@@ -23,8 +23,8 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::{Scalar, ScalarVectorBuilder};
use datatypes::value::{ListValue, Value};
use datatypes::vectors::{
BooleanVectorBuilder, Float64VectorBuilder, Int32VectorBuilder, ListVectorBuilder,
MutableVector, StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
BooleanVectorBuilder, Int32VectorBuilder, ListVectorBuilder, MutableVector,
StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
};
use derive_more::Display;
use h3o::{CellIndex, LatLng, Resolution};
@@ -38,7 +38,6 @@ static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
vec![
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint64_datatype(),
ConcreteDataType::string_datatype(),
]
});
@@ -953,181 +952,6 @@ impl Function for H3GridPathCells {
}
}
/// Tests if cells contains given cells
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct H3CellContains;
impl Function for H3CellContains {
fn name(&self) -> &str {
"h3_cells_contains"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::boolean_datatype())
}
fn signature(&self) -> Signature {
let multi_cell_types = vec![
ConcreteDataType::list_datatype(ConcreteDataType::int64_datatype()),
ConcreteDataType::list_datatype(ConcreteDataType::uint64_datatype()),
ConcreteDataType::list_datatype(ConcreteDataType::string_datatype()),
ConcreteDataType::string_datatype(),
];
let mut signatures = Vec::with_capacity(multi_cell_types.len() * CELL_TYPES.len());
for multi_cell_type in &multi_cell_types {
for cell_type in CELL_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
multi_cell_type.clone(),
cell_type.clone(),
]));
}
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let cells_vec = &columns[0];
let cell_this_vec = &columns[1];
let size = cell_this_vec.len();
let mut results = BooleanVectorBuilder::with_capacity(size);
for i in 0..size {
let mut result = None;
if let (cells, Some(cell_this)) = (
cells_from_value(cells_vec.get(i))?,
cell_from_value(cell_this_vec.get(i))?,
) {
result = Some(false);
for cell_that in cells.iter() {
// get cell resolution, and find cell_this's parent at
// this solution, test if cell_that equals the parent
let resolution = cell_that.resolution();
if let Some(cell_this_parent) = cell_this.parent(resolution) {
if cell_this_parent == *cell_that {
result = Some(true);
break;
}
}
}
}
results.push(result);
}
Ok(results.to_vector())
}
}
/// Get WGS84 great circle distance of two cell centroid
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct H3CellDistanceSphereKm;
impl Function for H3CellDistanceSphereKm {
fn name(&self) -> &str {
"h3_distance_sphere_km"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float64_datatype())
}
fn signature(&self) -> Signature {
signature_of_double_cells()
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let cell_this_vec = &columns[0];
let cell_that_vec = &columns[1];
let size = cell_this_vec.len();
let mut results = Float64VectorBuilder::with_capacity(size);
for i in 0..size {
let result = match (
cell_from_value(cell_this_vec.get(i))?,
cell_from_value(cell_that_vec.get(i))?,
) {
(Some(cell_this), Some(cell_that)) => {
let centroid_this = LatLng::from(cell_this);
let centroid_that = LatLng::from(cell_that);
Some(centroid_this.distance_km(centroid_that))
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
/// Get Euclidean distance of two cell centroid
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct H3CellDistanceEuclideanDegree;
impl H3CellDistanceEuclideanDegree {
fn distance(centroid_this: LatLng, centroid_that: LatLng) -> f64 {
((centroid_this.lat() - centroid_that.lat()).powi(2)
+ (centroid_this.lng() - centroid_that.lng()).powi(2))
.sqrt()
}
}
impl Function for H3CellDistanceEuclideanDegree {
fn name(&self) -> &str {
"h3_distance_degree"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float64_datatype())
}
fn signature(&self) -> Signature {
signature_of_double_cells()
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let cell_this_vec = &columns[0];
let cell_that_vec = &columns[1];
let size = cell_this_vec.len();
let mut results = Float64VectorBuilder::with_capacity(size);
for i in 0..size {
let result = match (
cell_from_value(cell_this_vec.get(i))?,
cell_from_value(cell_that_vec.get(i))?,
) {
(Some(cell_this), Some(cell_that)) => {
let centroid_this = LatLng::from(cell_this);
let centroid_that = LatLng::from(cell_that);
let dist = Self::distance(centroid_this, centroid_that);
Some(dist)
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
fn value_to_resolution(v: Value) -> Result<Resolution> {
let r = match v {
Value::Int8(v) => v as u8,
@@ -1249,126 +1073,7 @@ fn cell_from_value(v: Value) -> Result<Option<CellIndex>> {
})
.context(error::ExecuteSnafu)?,
),
Value::String(s) => Some(
CellIndex::from_str(s.as_utf8())
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)?,
),
_ => None,
};
Ok(cell)
}
/// extract cell array from all possible types including:
/// - int64 list
/// - uint64 list
/// - string list
/// - comma-separated string
fn cells_from_value(v: Value) -> Result<Vec<CellIndex>> {
match v {
Value::List(list) => match list.datatype() {
ConcreteDataType::Int64(_) => list
.items()
.iter()
.map(|v| {
if let Value::Int64(v) = v {
CellIndex::try_from(*v as u64)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)
} else {
Err(BoxedError::new(PlainError::new(
"Invalid data type in array".to_string(),
StatusCode::EngineExecuteQuery,
)))
.context(error::ExecuteSnafu)
}
})
.collect::<Result<Vec<CellIndex>>>(),
ConcreteDataType::UInt64(_) => list
.items()
.iter()
.map(|v| {
if let Value::UInt64(v) = v {
CellIndex::try_from(*v)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)
} else {
Err(BoxedError::new(PlainError::new(
"Invalid data type in array".to_string(),
StatusCode::EngineExecuteQuery,
)))
.context(error::ExecuteSnafu)
}
})
.collect::<Result<Vec<CellIndex>>>(),
ConcreteDataType::String(_) => list
.items()
.iter()
.map(|v| {
if let Value::String(v) = v {
CellIndex::from_str(v.as_utf8().trim())
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)
} else {
Err(BoxedError::new(PlainError::new(
"Invalid data type in array".to_string(),
StatusCode::EngineExecuteQuery,
)))
.context(error::ExecuteSnafu)
}
})
.collect::<Result<Vec<CellIndex>>>(),
_ => Ok(vec![]),
},
Value::String(csv) => {
let str_seq = csv.as_utf8().split(',');
str_seq
.map(|v| {
CellIndex::from_str(v.trim())
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)
})
.collect::<Result<Vec<CellIndex>>>()
}
_ => Ok(vec![]),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_h3_euclidean_distance() {
let point_this = LatLng::new(42.3521, -72.1235).expect("incorrect lat lng");
let point_that = LatLng::new(42.45, -72.1260).expect("incorrect lat lng");
let dist = H3CellDistanceEuclideanDegree::distance(point_this, point_that);
assert_eq!(dist, 0.09793191512474639);
}
}

View File

@@ -1,195 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::vectors::{Float64VectorBuilder, MutableVector, VectorRef};
use derive_more::Display;
use geo::algorithm::line_measures::metric_spaces::Euclidean;
use geo::{Area, Distance, Haversine};
use geo_types::Geometry;
use snafu::ResultExt;
use super::helpers::{ensure_columns_len, ensure_columns_n};
use super::wkt::parse_wkt;
use crate::function::{Function, FunctionContext};
/// Return WGS84(SRID: 4326) euclidean distance between two geometry object, in degree
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct STDistance;
impl Function for STDistance {
fn name(&self) -> &str {
"st_distance"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float64_datatype())
}
fn signature(&self) -> Signature {
Signature::new(
TypeSignature::Exact(vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype(),
]),
Volatility::Stable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let wkt_this_vec = &columns[0];
let wkt_that_vec = &columns[1];
let size = wkt_this_vec.len();
let mut results = Float64VectorBuilder::with_capacity(size);
for i in 0..size {
let wkt_this = wkt_this_vec.get(i).as_string();
let wkt_that = wkt_that_vec.get(i).as_string();
let result = match (wkt_this, wkt_that) {
(Some(wkt_this), Some(wkt_that)) => {
let geom_this = parse_wkt(&wkt_this)?;
let geom_that = parse_wkt(&wkt_that)?;
Some(Euclidean::distance(&geom_this, &geom_that))
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
/// Return great circle distance between two geometry object, in meters
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct STDistanceSphere;
impl Function for STDistanceSphere {
fn name(&self) -> &str {
"st_distance_sphere_m"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float64_datatype())
}
fn signature(&self) -> Signature {
Signature::new(
TypeSignature::Exact(vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype(),
]),
Volatility::Stable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let wkt_this_vec = &columns[0];
let wkt_that_vec = &columns[1];
let size = wkt_this_vec.len();
let mut results = Float64VectorBuilder::with_capacity(size);
for i in 0..size {
let wkt_this = wkt_this_vec.get(i).as_string();
let wkt_that = wkt_that_vec.get(i).as_string();
let result = match (wkt_this, wkt_that) {
(Some(wkt_this), Some(wkt_that)) => {
let geom_this = parse_wkt(&wkt_this)?;
let geom_that = parse_wkt(&wkt_that)?;
match (geom_this, geom_that) {
(Geometry::Point(this), Geometry::Point(that)) => {
Some(Haversine::distance(this, that))
}
_ => {
Err(BoxedError::new(PlainError::new(
"Great circle distance between non-point objects are not supported for now.".to_string(),
StatusCode::Unsupported,
))).context(error::ExecuteSnafu)?
}
}
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
/// Return area of given geometry object
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct STArea;
impl Function for STArea {
fn name(&self) -> &str {
"st_area"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float64_datatype())
}
fn signature(&self) -> Signature {
Signature::new(
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
Volatility::Stable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 1);
let wkt_vec = &columns[0];
let size = wkt_vec.len();
let mut results = Float64VectorBuilder::with_capacity(size);
for i in 0..size {
let wkt = wkt_vec.get(i).as_string();
let result = if let Some(wkt) = wkt {
let geom = parse_wkt(&wkt)?;
Some(geom.unsigned_area())
} else {
None
};
results.push(result);
}
Ok(results.to_vector())
}
}

View File

@@ -1,190 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_query::error::Result;
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::vectors::{BooleanVectorBuilder, MutableVector, VectorRef};
use derive_more::Display;
use geo::algorithm::contains::Contains;
use geo::algorithm::intersects::Intersects;
use geo::algorithm::within::Within;
use super::helpers::{ensure_columns_len, ensure_columns_n};
use super::wkt::parse_wkt;
use crate::function::{Function, FunctionContext};
/// Test if spatial relationship: contains
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct STContains;
impl Function for STContains {
fn name(&self) -> &str {
"st_contains"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::boolean_datatype())
}
fn signature(&self) -> Signature {
Signature::new(
TypeSignature::Exact(vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype(),
]),
Volatility::Stable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let wkt_this_vec = &columns[0];
let wkt_that_vec = &columns[1];
let size = wkt_this_vec.len();
let mut results = BooleanVectorBuilder::with_capacity(size);
for i in 0..size {
let wkt_this = wkt_this_vec.get(i).as_string();
let wkt_that = wkt_that_vec.get(i).as_string();
let result = match (wkt_this, wkt_that) {
(Some(wkt_this), Some(wkt_that)) => {
let geom_this = parse_wkt(&wkt_this)?;
let geom_that = parse_wkt(&wkt_that)?;
Some(geom_this.contains(&geom_that))
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
/// Test if spatial relationship: within
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct STWithin;
impl Function for STWithin {
fn name(&self) -> &str {
"st_within"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::boolean_datatype())
}
fn signature(&self) -> Signature {
Signature::new(
TypeSignature::Exact(vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype(),
]),
Volatility::Stable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let wkt_this_vec = &columns[0];
let wkt_that_vec = &columns[1];
let size = wkt_this_vec.len();
let mut results = BooleanVectorBuilder::with_capacity(size);
for i in 0..size {
let wkt_this = wkt_this_vec.get(i).as_string();
let wkt_that = wkt_that_vec.get(i).as_string();
let result = match (wkt_this, wkt_that) {
(Some(wkt_this), Some(wkt_that)) => {
let geom_this = parse_wkt(&wkt_this)?;
let geom_that = parse_wkt(&wkt_that)?;
Some(geom_this.is_within(&geom_that))
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}
/// Test if spatial relationship: within
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct STIntersects;
impl Function for STIntersects {
fn name(&self) -> &str {
"st_intersects"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::boolean_datatype())
}
fn signature(&self) -> Signature {
Signature::new(
TypeSignature::Exact(vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype(),
]),
Volatility::Stable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let wkt_this_vec = &columns[0];
let wkt_that_vec = &columns[1];
let size = wkt_this_vec.len();
let mut results = BooleanVectorBuilder::with_capacity(size);
for i in 0..size {
let wkt_this = wkt_this_vec.get(i).as_string();
let wkt_that = wkt_that_vec.get(i).as_string();
let result = match (wkt_this, wkt_that) {
(Some(wkt_this), Some(wkt_that)) => {
let geom_this = parse_wkt(&wkt_this)?;
let geom_that = parse_wkt(&wkt_that)?;
Some(geom_this.intersects(&geom_that))
}
_ => None,
};
results.push(result);
}
Ok(results.to_vector())
}
}

View File

@@ -1,100 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::{BoxedError, PlainError};
use common_error::status_code::StatusCode;
use common_query::error::{self, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
use derive_more::Display;
use geo_types::{Geometry, Point};
use once_cell::sync::Lazy;
use snafu::ResultExt;
use wkt::{ToWkt, TryFromWkt};
use super::helpers::{ensure_columns_len, ensure_columns_n};
use crate::function::{Function, FunctionContext};
static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
vec![
ConcreteDataType::float32_datatype(),
ConcreteDataType::float64_datatype(),
]
});
/// Return WGS84(SRID: 4326) euclidean distance between two geometry object, in degree
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
pub struct LatLngToPointWkt;
impl Function for LatLngToPointWkt {
fn name(&self) -> &str {
"wkt_point_from_latlng"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
for coord_type in COORDINATE_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
// longitude
coord_type.clone(),
]));
}
Signature::one_of(signatures, Volatility::Stable)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure_columns_n!(columns, 2);
let lat_vec = &columns[0];
let lng_vec = &columns[1];
let size = lat_vec.len();
let mut results = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let lat = lat_vec.get(i).as_f64_lossy();
let lng = lng_vec.get(i).as_f64_lossy();
let result = match (lat, lng) {
(Some(lat), Some(lng)) => Some(Point::new(lng, lat).wkt_string()),
_ => None,
};
results.push(result.as_deref());
}
Ok(results.to_vector())
}
}
pub(super) fn parse_wkt(s: &str) -> Result<Geometry> {
Geometry::try_from_wkt_str(s)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("Fail to parse WKT: {}", e),
StatusCode::EngineExecuteQuery,
))
})
.context(error::ExecuteSnafu)
}

View File

@@ -15,7 +15,7 @@
use std::fmt::{self, Display};
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
use common_query::prelude::{Signature, TypeSignature};
use common_query::prelude::Signature;
use datafusion::logical_expr::Volatility;
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::VectorRef;
@@ -41,24 +41,10 @@ impl Function for JsonPathExistsFunction {
}
fn signature(&self) -> Signature {
Signature::one_of(
Signature::exact(
vec![
TypeSignature::Exact(vec![
ConcreteDataType::json_datatype(),
ConcreteDataType::string_datatype(),
]),
TypeSignature::Exact(vec![
ConcreteDataType::null_datatype(),
ConcreteDataType::string_datatype(),
]),
TypeSignature::Exact(vec![
ConcreteDataType::json_datatype(),
ConcreteDataType::null_datatype(),
]),
TypeSignature::Exact(vec![
ConcreteDataType::null_datatype(),
ConcreteDataType::null_datatype(),
]),
ConcreteDataType::json_datatype(),
ConcreteDataType::string_datatype(),
],
Volatility::Immutable,
)
@@ -78,26 +64,25 @@ impl Function for JsonPathExistsFunction {
let paths = &columns[1];
let size = jsons.len();
let datatype = jsons.data_type();
let mut results = BooleanVectorBuilder::with_capacity(size);
match (jsons.data_type(), paths.data_type()) {
(ConcreteDataType::Binary(_), ConcreteDataType::String(_)) => {
match datatype {
// JSON data type uses binary vector
ConcreteDataType::Binary(_) => {
for i in 0..size {
let result = match (jsons.get_ref(i).as_binary(), paths.get_ref(i).as_string())
{
let json = jsons.get_ref(i);
let path = paths.get_ref(i);
let json = json.as_binary();
let path = path.as_string();
let result = match (json, path) {
(Ok(Some(json)), Ok(Some(path))) => {
// Get `JsonPath`.
let json_path = match jsonb::jsonpath::parse_json_path(path.as_bytes())
{
Ok(json_path) => json_path,
Err(_) => {
return InvalidFuncArgsSnafu {
err_msg: format!("Illegal json path: {:?}", path),
}
.fail();
}
};
jsonb::path_exists(json, json_path).ok()
let json_path = jsonb::jsonpath::parse_json_path(path.as_bytes());
match json_path {
Ok(json_path) => jsonb::path_exists(json, json_path).ok(),
Err(_) => None,
}
}
_ => None,
};
@@ -105,12 +90,6 @@ impl Function for JsonPathExistsFunction {
results.push(result);
}
}
// Any null args existence causes the result to be NULL.
(ConcreteDataType::Null(_), ConcreteDataType::String(_)) => results.push_nulls(size),
(ConcreteDataType::Binary(_), ConcreteDataType::Null(_)) => results.push_nulls(size),
(ConcreteDataType::Null(_), ConcreteDataType::Null(_)) => results.push_nulls(size),
_ => {
return UnsupportedInputDataTypeSnafu {
function: NAME,
@@ -135,8 +114,8 @@ mod tests {
use std::sync::Arc;
use common_query::prelude::TypeSignature;
use datatypes::prelude::ScalarVector;
use datatypes::vectors::{BinaryVector, NullVector, StringVector};
use datatypes::scalars::ScalarVector;
use datatypes::vectors::{BinaryVector, StringVector};
use super::*;
@@ -154,27 +133,9 @@ mod tests {
assert!(matches!(json_path_exists.signature(),
Signature {
type_signature: TypeSignature::OneOf(valid_types),
type_signature: TypeSignature::Exact(valid_types),
volatility: Volatility::Immutable
} if valid_types ==
vec![
TypeSignature::Exact(vec![
ConcreteDataType::json_datatype(),
ConcreteDataType::string_datatype(),
]),
TypeSignature::Exact(vec![
ConcreteDataType::null_datatype(),
ConcreteDataType::string_datatype(),
]),
TypeSignature::Exact(vec![
ConcreteDataType::json_datatype(),
ConcreteDataType::null_datatype(),
]),
TypeSignature::Exact(vec![
ConcreteDataType::null_datatype(),
ConcreteDataType::null_datatype(),
]),
],
} if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()]
));
let json_strings = [
@@ -182,15 +143,9 @@ mod tests {
r#"{"a": 4, "b": {"c": 6}, "c": 6}"#,
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
r#"[1, 2, 3]"#,
r#"null"#,
r#"{"a": 7, "b": 8, "c": {"a": 7}}"#,
r#"null"#,
];
let paths = vec![
"$.a.b.c", "$.b", "$.c.a", ".d", "$[0]", "$.a", "null", "null",
];
let expected = [false, true, true, false, true, false, false, false];
let paths = vec!["$.a.b.c", "$.b", "$.c.a", ".d"];
let results = [false, true, true, false];
let jsonbs = json_strings
.iter()
@@ -207,44 +162,11 @@ mod tests {
.eval(FunctionContext::default(), &args)
.unwrap();
// Test for non-nulls.
assert_eq!(8, vector.len());
for (i, real) in expected.iter().enumerate() {
assert_eq!(4, vector.len());
for (i, gt) in results.iter().enumerate() {
let result = vector.get_ref(i);
assert!(!result.is_null());
let val = result.as_boolean().unwrap().unwrap();
assert_eq!(val, *real);
let result = result.as_boolean().unwrap().unwrap();
assert_eq!(*gt, result);
}
// Test for path error.
let json_bytes = jsonb::parse_value("{}".as_bytes()).unwrap().to_vec();
let json = BinaryVector::from_vec(vec![json_bytes]);
let illegal_path = StringVector::from_vec(vec!["$..a"]);
let args: Vec<VectorRef> = vec![Arc::new(json), Arc::new(illegal_path)];
let err = json_path_exists.eval(FunctionContext::default(), &args);
assert!(err.is_err());
// Test for nulls.
let json_bytes = jsonb::parse_value("{}".as_bytes()).unwrap().to_vec();
let json = BinaryVector::from_vec(vec![json_bytes]);
let null_json = NullVector::new(1);
let path = StringVector::from_vec(vec!["$.a"]);
let null_path = NullVector::new(1);
let args: Vec<VectorRef> = vec![Arc::new(null_json), Arc::new(path)];
let result1 = json_path_exists
.eval(FunctionContext::default(), &args)
.unwrap();
let args: Vec<VectorRef> = vec![Arc::new(json), Arc::new(null_path)];
let result2 = json_path_exists
.eval(FunctionContext::default(), &args)
.unwrap();
assert_eq!(result1.len(), 1);
assert!(result1.get_ref(0).is_null());
assert_eq!(result2.len(), 1);
assert!(result2.get_ref(0).is_null());
}
}

View File

@@ -22,12 +22,8 @@ use datafusion::arrow::compute::kernels::cmp::gt;
use datatypes::arrow::array::AsArray;
use datatypes::arrow::compute::cast;
use datatypes::arrow::compute::kernels::zip;
use datatypes::arrow::datatypes::{
DataType as ArrowDataType, Date32Type, Date64Type, TimestampMicrosecondType,
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
};
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
@@ -38,47 +34,13 @@ pub struct GreatestFunction;
const NAME: &str = "greatest";
macro_rules! gt_time_types {
($ty: ident, $columns:expr) => {{
let column1 = $columns[0].to_arrow_array();
let column2 = $columns[1].to_arrow_array();
let column1 = column1.as_primitive::<$ty>();
let column2 = column2.as_primitive::<$ty>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
}};
}
impl Function for GreatestFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
ensure!(
input_types.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
input_types.len()
)
}
);
match &input_types[0] {
ConcreteDataType::String(_) => Ok(ConcreteDataType::datetime_datatype()),
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
ConcreteDataType::DateTime(_) => Ok(ConcreteDataType::datetime_datatype()),
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: input_types,
}
.fail(),
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::date_datatype())
}
fn signature(&self) -> Signature {
@@ -87,11 +49,6 @@ impl Function for GreatestFunction {
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::date_datatype(),
ConcreteDataType::datetime_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_second_datatype(),
],
Volatility::Immutable,
)
@@ -109,32 +66,27 @@ impl Function for GreatestFunction {
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
// Treats string as `DateTime` type.
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date64)
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date32)
.context(ArrowComputeSnafu)?;
let column1 = column1.as_primitive::<Date64Type>();
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date64)
let column1 = column1.as_primitive::<Date32Type>();
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
.context(ArrowComputeSnafu)?;
let column2 = column2.as_primitive::<Date64Type>();
let column2 = column2.as_primitive::<Date32Type>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
ConcreteDataType::Date(_) => {
let column1 = columns[0].to_arrow_array();
let column1 = column1.as_primitive::<Date32Type>();
let column2 = columns[1].to_arrow_array();
let column2 = column2.as_primitive::<Date32Type>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
ConcreteDataType::DateTime(_) => gt_time_types!(Date64Type, columns),
ConcreteDataType::Timestamp(ts_type) => match ts_type {
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
TimestampType::Millisecond(_) => {
gt_time_types!(TimestampMillisecondType, columns)
}
TimestampType::Microsecond(_) => {
gt_time_types!(TimestampMicrosecondType, columns)
}
TimestampType::Nanosecond(_) => {
gt_time_types!(TimestampNanosecondType, columns)
}
},
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
@@ -154,31 +106,19 @@ impl fmt::Display for GreatestFunction {
mod tests {
use std::sync::Arc;
use common_time::timestamp::TimeUnit;
use common_time::{Date, DateTime, Timestamp};
use datatypes::types::{
DateTimeType, DateType, TimestampMicrosecondType, TimestampMillisecondType,
TimestampNanosecondType, TimestampSecondType,
};
use common_time::Date;
use datatypes::prelude::ConcreteDataType;
use datatypes::types::DateType;
use datatypes::value::Value;
use datatypes::vectors::{
DateTimeVector, DateVector, StringVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
};
use paste::paste;
use datatypes::vectors::{DateVector, StringVector, Vector};
use super::*;
#[test]
fn test_greatest_takes_string_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype()
])
.unwrap(),
ConcreteDataType::DateTime(DateTimeType)
function.return_type(&[]).unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(StringVector::from(vec![
@@ -192,15 +132,15 @@ mod tests {
];
let result = function.eval(FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::DateTime(DateTime::from_str("2001-02-01 00:00:00", None).unwrap())
Value::Date(Date::from_str_utc("2001-02-01").unwrap())
);
assert_eq!(
result.get(1),
Value::DateTime(DateTime::from_str("2012-12-23 00:00:00", None).unwrap())
Value::Date(Date::from_str_utc("2012-12-23").unwrap())
);
}
@@ -208,15 +148,9 @@ mod tests {
fn test_greatest_takes_date_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::date_datatype(),
ConcreteDataType::date_datatype()
])
.unwrap(),
function.return_type(&[]).unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
@@ -234,81 +168,4 @@ mod tests {
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
);
}
#[test]
fn test_greatest_takes_datetime_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::datetime_datatype(),
ConcreteDataType::datetime_datatype()
])
.unwrap(),
ConcreteDataType::DateTime(DateTimeType)
);
let columns = vec![
Arc::new(DateTimeVector::from_slice(vec![-1, 2])) as _,
Arc::new(DateTimeVector::from_slice(vec![0, 1])) as _,
];
let result = function.eval(FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00.002", None).unwrap())
);
}
macro_rules! test_timestamp {
($type: expr,$unit: ident) => {
paste! {
#[test]
fn [<test_greatest_takes_ $unit:lower _vector>]() {
let function = GreatestFunction;
assert_eq!(
function.return_type(&[$type, $type]).unwrap(),
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
);
let columns = vec![
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
];
let result = function.eval(FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
);
}
}
}
}
test_timestamp!(
ConcreteDataType::timestamp_nanosecond_datatype(),
Nanosecond
);
test_timestamp!(
ConcreteDataType::timestamp_microsecond_datatype(),
Microsecond
);
test_timestamp!(
ConcreteDataType::timestamp_millisecond_datatype(),
Millisecond
);
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
}

View File

@@ -1,35 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod convert;
mod distance;
use std::sync::Arc;
use crate::function_registry::FunctionRegistry;
pub(crate) struct VectorFunction;
impl VectorFunction {
pub fn register(registry: &FunctionRegistry) {
// conversion
registry.register(Arc::new(convert::ParseVectorFunction));
registry.register(Arc::new(convert::VectorToStringFunction));
// distance
registry.register(Arc::new(distance::CosDistanceFunction));
registry.register(Arc::new(distance::DotProductFunction));
registry.register(Arc::new(distance::L2SqDistanceFunction));
}
}

View File

@@ -1,160 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use common_query::error::{InvalidFuncArgsSnafu, InvalidVectorStringSnafu, Result};
use common_query::prelude::{Signature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::types::parse_string_to_vector_type_value;
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
const NAME: &str = "parse_vec";
#[derive(Debug, Clone, Default)]
pub struct ParseVectorFunction;
impl Function for ParseVectorFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::binary_datatype())
}
fn signature(&self) -> Signature {
Signature::exact(
vec![ConcreteDataType::string_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
),
}
);
let column = &columns[0];
let size = column.len();
let mut result = BinaryVectorBuilder::with_capacity(size);
for i in 0..size {
let value = column.get(i).as_string();
if let Some(value) = value {
let res = parse_string_to_vector_type_value(&value, None)
.context(InvalidVectorStringSnafu { vec_str: &value })?;
result.push(Some(&res));
} else {
result.push_null();
}
}
Ok(result.to_vector())
}
}
impl Display for ParseVectorFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_base::bytes::Bytes;
use datatypes::value::Value;
use datatypes::vectors::StringVector;
use super::*;
#[test]
fn test_parse_vector() {
let func = ParseVectorFunction;
let input = Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
None,
]));
let result = func.eval(FunctionContext::default(), &[input]).unwrap();
let result = result.as_ref();
assert_eq!(result.len(), 3);
assert_eq!(
result.get(0),
Value::Binary(Bytes::from(
[1.0f32, 2.0, 3.0]
.iter()
.flat_map(|e| e.to_le_bytes())
.collect::<Vec<u8>>()
))
);
assert_eq!(
result.get(1),
Value::Binary(Bytes::from(
[4.0f32, 5.0, 6.0]
.iter()
.flat_map(|e| e.to_le_bytes())
.collect::<Vec<u8>>()
))
);
assert!(result.get(2).is_null());
}
#[test]
fn test_parse_vector_error() {
let func = ParseVectorFunction;
let input = Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
Some("[7.0,8.0,9.0".to_string()),
]));
let result = func.eval(FunctionContext::default(), &[input]);
assert!(result.is_err());
let input = Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
Some("7.0,8.0,9.0]".to_string()),
]));
let result = func.eval(FunctionContext::default(), &[input]);
assert!(result.is_err());
let input = Arc::new(StringVector::from(vec![
Some("[1.0,2.0,3.0]".to_string()),
Some("[4.0,5.0,6.0]".to_string()),
Some("[7.0,hello,9.0]".to_string()),
]));
let result = func.eval(FunctionContext::default(), &[input]);
assert!(result.is_err());
}
}

View File

@@ -1,139 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use common_query::error::{InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, Volatility};
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::types::vector_type_value_to_string;
use datatypes::value::Value;
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
use snafu::ensure;
use crate::function::{Function, FunctionContext};
const NAME: &str = "vec_to_string";
#[derive(Debug, Clone, Default)]
pub struct VectorToStringFunction;
impl Function for VectorToStringFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::string_datatype())
}
fn signature(&self) -> Signature {
Signature::exact(
vec![ConcreteDataType::binary_datatype()],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly one, have: {}",
columns.len()
),
}
);
let column = &columns[0];
let size = column.len();
let mut result = StringVectorBuilder::with_capacity(size);
for i in 0..size {
let value = column.get(i);
match value {
Value::Binary(bytes) => {
let len = bytes.len();
if len % std::mem::size_of::<f32>() != 0 {
return InvalidFuncArgsSnafu {
err_msg: format!("Invalid binary length of vector: {}", len),
}
.fail();
}
let dim = len / std::mem::size_of::<f32>();
// Safety: `dim` is calculated from the length of `bytes` and is guaranteed to be valid
let res = vector_type_value_to_string(&bytes, dim as _).unwrap();
result.push(Some(&res));
}
Value::Null => {
result.push_null();
}
_ => {
return InvalidFuncArgsSnafu {
err_msg: format!("Invalid value type: {:?}", value.data_type()),
}
.fail();
}
}
}
Ok(result.to_vector())
}
}
impl Display for VectorToStringFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", NAME.to_ascii_uppercase())
}
}
#[cfg(test)]
mod tests {
use datatypes::value::Value;
use datatypes::vectors::BinaryVectorBuilder;
use super::*;
#[test]
fn test_vector_to_string() {
let func = VectorToStringFunction;
let mut builder = BinaryVectorBuilder::with_capacity(3);
builder.push(Some(
[1.0f32, 2.0, 3.0]
.iter()
.flat_map(|e| e.to_le_bytes())
.collect::<Vec<_>>()
.as_slice(),
));
builder.push(Some(
[4.0f32, 5.0, 6.0]
.iter()
.flat_map(|e| e.to_le_bytes())
.collect::<Vec<_>>()
.as_slice(),
));
builder.push_null();
let vector = builder.to_vector();
let result = func.eval(FunctionContext::default(), &[vector]).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(result.get(0), Value::String("[1,2,3]".to_string().into()));
assert_eq!(result.get(1), Value::String("[4,5,6]".to_string().into()));
assert_eq!(result.get(2), Value::Null);
}
}

View File

@@ -1,482 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod cos;
mod dot;
mod l2sq;
use std::borrow::Cow;
use std::fmt::Display;
use std::sync::Arc;
use common_query::error::{InvalidFuncArgsSnafu, Result};
use common_query::prelude::Signature;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::value::ValueRef;
use datatypes::vectors::{Float32VectorBuilder, MutableVector, Vector, VectorRef};
use snafu::ensure;
use crate::function::{Function, FunctionContext};
use crate::helper;
macro_rules! define_distance_function {
($StructName:ident, $display_name:expr, $similarity_method:path) => {
/// A function calculates the distance between two vectors.
#[derive(Debug, Clone, Default)]
pub struct $StructName;
impl Function for $StructName {
fn name(&self) -> &str {
$display_name
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
Ok(ConcreteDataType::float32_datatype())
}
fn signature(&self) -> Signature {
helper::one_of_sigs2(
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::binary_datatype(),
],
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::binary_datatype(),
],
)
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
columns.len()
),
}
);
let arg0 = &columns[0];
let arg1 = &columns[1];
let size = arg0.len();
let mut result = Float32VectorBuilder::with_capacity(size);
if size == 0 {
return Ok(result.to_vector());
}
let arg0_const = parse_if_constant_string(arg0)?;
let arg1_const = parse_if_constant_string(arg1)?;
for i in 0..size {
let vec0 = match arg0_const.as_ref() {
Some(a) => Some(Cow::Borrowed(a.as_slice())),
None => as_vector(arg0.get_ref(i))?,
};
let vec1 = match arg1_const.as_ref() {
Some(b) => Some(Cow::Borrowed(b.as_slice())),
None => as_vector(arg1.get_ref(i))?,
};
if let (Some(vec0), Some(vec1)) = (vec0, vec1) {
ensure!(
vec0.len() == vec1.len(),
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the vectors must match to calculate distance, have: {} vs {}",
vec0.len(),
vec1.len()
),
}
);
// Checked if the length of the vectors match
let d = $similarity_method(vec0.as_ref(), vec1.as_ref());
result.push(Some(d));
} else {
result.push_null();
}
}
return Ok(result.to_vector());
}
}
impl Display for $StructName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", $display_name.to_ascii_uppercase())
}
}
}
}
define_distance_function!(CosDistanceFunction, "vec_cos_distance", cos::cos);
define_distance_function!(L2SqDistanceFunction, "vec_l2sq_distance", l2sq::l2sq);
define_distance_function!(DotProductFunction, "vec_dot_product", dot::dot);
/// Parse a vector value if the value is a constant string.
fn parse_if_constant_string(arg: &Arc<dyn Vector>) -> Result<Option<Vec<f32>>> {
if !arg.is_const() {
return Ok(None);
}
if arg.data_type() != ConcreteDataType::string_datatype() {
return Ok(None);
}
arg.get_ref(0)
.as_string()
.unwrap() // Safe: checked if it is a string
.map(parse_f32_vector_from_string)
.transpose()
}
/// Convert a value to a vector value.
/// Supported data types are binary and string.
fn as_vector(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
match arg.data_type() {
ConcreteDataType::Binary(_) => arg
.as_binary()
.unwrap() // Safe: checked if it is a binary
.map(binary_as_vector)
.transpose(),
ConcreteDataType::String(_) => arg
.as_string()
.unwrap() // Safe: checked if it is a string
.map(|s| Ok(Cow::Owned(parse_f32_vector_from_string(s)?)))
.transpose(),
ConcreteDataType::Null(_) => Ok(None),
_ => InvalidFuncArgsSnafu {
err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
}
.fail(),
}
}
/// Convert a u8 slice to a vector value.
fn binary_as_vector(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
if bytes.len() % std::mem::size_of::<f32>() != 0 {
return InvalidFuncArgsSnafu {
err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
}
.fail();
}
if cfg!(target_endian = "little") {
Ok(unsafe {
let vec = std::slice::from_raw_parts(
bytes.as_ptr() as *const f32,
bytes.len() / std::mem::size_of::<f32>(),
);
Cow::Borrowed(vec)
})
} else {
let v = bytes
.chunks_exact(std::mem::size_of::<f32>())
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
.collect::<Vec<f32>>();
Ok(Cow::Owned(v))
}
}
/// Parse a string to a vector value.
/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
fn parse_f32_vector_from_string(s: &str) -> Result<Vec<f32>> {
let trimmed = s.trim();
if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
return InvalidFuncArgsSnafu {
err_msg: format!(
"Failed to parse {s} to Vector value: not properly enclosed in brackets"
),
}
.fail();
}
let content = trimmed[1..trimmed.len() - 1].trim();
if content.is_empty() {
return Ok(Vec::new());
}
content
.split(',')
.map(|s| s.trim().parse::<f32>())
.collect::<std::result::Result<_, _>>()
.map_err(|e| {
InvalidFuncArgsSnafu {
err_msg: format!("Failed to parse {s} to Vector value: {e}"),
}
.build()
})
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use datatypes::vectors::{BinaryVector, ConstantVector, StringVector};
use super::*;
#[test]
fn test_distance_string_string() {
let funcs = [
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
Box::new(DotProductFunction {}) as Box<dyn Function>,
];
for func in funcs {
let vec1 = Arc::new(StringVector::from(vec![
Some("[0.0, 1.0]"),
Some("[1.0, 0.0]"),
None,
Some("[1.0, 0.0]"),
])) as VectorRef;
let vec2 = Arc::new(StringVector::from(vec![
Some("[0.0, 1.0]"),
Some("[0.0, 1.0]"),
Some("[0.0, 1.0]"),
None,
])) as VectorRef;
let result = func
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(result.get(3).is_null());
let result = func
.eval(FunctionContext::default(), &[vec2, vec1])
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(result.get(3).is_null());
}
}
#[test]
fn test_distance_binary_binary() {
let funcs = [
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
Box::new(DotProductFunction {}) as Box<dyn Function>,
];
for func in funcs {
let vec1 = Arc::new(BinaryVector::from(vec![
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 128, 63, 0, 0, 0, 0]),
None,
Some(vec![0, 0, 128, 63, 0, 0, 0, 0]),
])) as VectorRef;
let vec2 = Arc::new(BinaryVector::from(vec![
// [0.0, 1.0]
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
None,
])) as VectorRef;
let result = func
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(result.get(3).is_null());
let result = func
.eval(FunctionContext::default(), &[vec2, vec1])
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(result.get(3).is_null());
}
}
#[test]
fn test_distance_string_binary() {
let funcs = [
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
Box::new(DotProductFunction {}) as Box<dyn Function>,
];
for func in funcs {
let vec1 = Arc::new(StringVector::from(vec![
Some("[0.0, 1.0]"),
Some("[1.0, 0.0]"),
None,
Some("[1.0, 0.0]"),
])) as VectorRef;
let vec2 = Arc::new(BinaryVector::from(vec![
// [0.0, 1.0]
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
None,
])) as VectorRef;
let result = func
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(result.get(3).is_null());
let result = func
.eval(FunctionContext::default(), &[vec2, vec1])
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(result.get(3).is_null());
}
}
#[test]
fn test_distance_const_string() {
let funcs = [
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
Box::new(DotProductFunction {}) as Box<dyn Function>,
];
for func in funcs {
let const_str = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["[0.0, 1.0]"])),
4,
));
let vec1 = Arc::new(StringVector::from(vec![
Some("[0.0, 1.0]"),
Some("[1.0, 0.0]"),
None,
Some("[1.0, 0.0]"),
])) as VectorRef;
let vec2 = Arc::new(BinaryVector::from(vec![
// [0.0, 1.0]
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
None,
])) as VectorRef;
let result = func
.eval(
FunctionContext::default(),
&[const_str.clone(), vec1.clone()],
)
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(!result.get(3).is_null());
let result = func
.eval(
FunctionContext::default(),
&[vec1.clone(), const_str.clone()],
)
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(result.get(2).is_null());
assert!(!result.get(3).is_null());
let result = func
.eval(
FunctionContext::default(),
&[const_str.clone(), vec2.clone()],
)
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(!result.get(2).is_null());
assert!(result.get(3).is_null());
let result = func
.eval(
FunctionContext::default(),
&[vec2.clone(), const_str.clone()],
)
.unwrap();
assert!(!result.get(0).is_null());
assert!(!result.get(1).is_null());
assert!(!result.get(2).is_null());
assert!(result.get(3).is_null());
}
}
#[test]
fn test_invalid_vector_length() {
let funcs = [
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
Box::new(DotProductFunction {}) as Box<dyn Function>,
];
for func in funcs {
let vec1 = Arc::new(StringVector::from(vec!["[1.0]"])) as VectorRef;
let vec2 = Arc::new(StringVector::from(vec!["[1.0, 1.0]"])) as VectorRef;
let result = func.eval(FunctionContext::default(), &[vec1, vec2]);
assert!(result.is_err());
let vec1 = Arc::new(BinaryVector::from(vec![vec![0, 0, 128, 63]])) as VectorRef;
let vec2 =
Arc::new(BinaryVector::from(vec![vec![0, 0, 128, 63, 0, 0, 0, 64]])) as VectorRef;
let result = func.eval(FunctionContext::default(), &[vec1, vec2]);
assert!(result.is_err());
}
}
#[test]
fn test_parse_vector_from_string() {
let result = parse_f32_vector_from_string("[1.0, 2.0, 3.0]").unwrap();
assert_eq!(result, vec![1.0, 2.0, 3.0]);
let result = parse_f32_vector_from_string("[]").unwrap();
assert_eq!(result, Vec::<f32>::new());
let result = parse_f32_vector_from_string("[1.0, a, 3.0]");
assert!(result.is_err());
}
#[test]
fn test_binary_as_vector() {
let bytes = [0, 0, 128, 63];
let result = binary_as_vector(&bytes).unwrap();
assert_eq!(result.as_ref(), &[1.0]);
let invalid_bytes = [0, 0, 128];
let result = binary_as_vector(&invalid_bytes);
assert!(result.is_err());
}
}

View File

@@ -1,87 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use nalgebra::DVectorView;
/// Calculates the cos distance between two vectors.
///
/// **Note:** Must ensure that the length of the two vectors are the same.
pub fn cos(lhs: &[f32], rhs: &[f32]) -> f32 {
let lhs_vec = DVectorView::from_slice(lhs, lhs.len());
let rhs_vec = DVectorView::from_slice(rhs, rhs.len());
let dot_product = lhs_vec.dot(&rhs_vec);
let lhs_norm = lhs_vec.norm();
let rhs_norm = rhs_vec.norm();
if dot_product.abs() < f32::EPSILON
|| lhs_norm.abs() < f32::EPSILON
|| rhs_norm.abs() < f32::EPSILON
{
return 1.0;
}
let cos_similar = dot_product / (lhs_norm * rhs_norm);
let res = 1.0 - cos_similar;
if res.abs() < f32::EPSILON {
0.0
} else {
res
}
}
#[cfg(test)]
mod tests {
use approx::assert_relative_eq;
use super::*;
#[test]
fn test_cos_scalar() {
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(cos(&lhs, &rhs), 0.025, epsilon = 1e-2);
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(cos(&lhs, &rhs), 0.04, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(cos(&lhs, &rhs), 0.04, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
}
}

View File

@@ -1,71 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use nalgebra::DVectorView;
/// Calculates the dot product between two vectors.
///
/// **Note:** Must ensure that the length of the two vectors are the same.
pub fn dot(lhs: &[f32], rhs: &[f32]) -> f32 {
let lhs = DVectorView::from_slice(lhs, lhs.len());
let rhs = DVectorView::from_slice(rhs, rhs.len());
lhs.dot(&rhs)
}
#[cfg(test)]
mod tests {
use approx::assert_relative_eq;
use super::*;
#[test]
fn test_dot_scalar() {
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(dot(&lhs, &rhs), 14.0, epsilon = 1e-2);
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(dot(&lhs, &rhs), 32.0, epsilon = 1e-2);
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(dot(&lhs, &rhs), 50.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(dot(&lhs, &rhs), 50.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(dot(&lhs, &rhs), 122.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(dot(&lhs, &rhs), 194.0, epsilon = 1e-2);
}
}

View File

@@ -1,71 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use nalgebra::DVectorView;
/// Calculates the squared L2 distance between two vectors.
///
/// **Note:** Must ensure that the length of the two vectors are the same.
pub fn l2sq(lhs: &[f32], rhs: &[f32]) -> f32 {
let lhs = DVectorView::from_slice(lhs, lhs.len());
let rhs = DVectorView::from_slice(rhs, rhs.len());
(lhs - rhs).norm_squared()
}
#[cfg(test)]
mod tests {
use approx::assert_relative_eq;
use super::*;
#[test]
fn test_l2sq_scalar() {
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 0.0, epsilon = 1e-2);
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 27.0, epsilon = 1e-2);
let lhs = vec![1.0, 2.0, 3.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 108.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 14.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 77.0, epsilon = 1e-2);
let lhs = vec![0.0, 0.0, 0.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 194.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![1.0, 2.0, 3.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 108.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![4.0, 5.0, 6.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 27.0, epsilon = 1e-2);
let lhs = vec![7.0, 8.0, 9.0];
let rhs = vec![7.0, 8.0, 9.0];
assert_relative_eq!(l2sq(&lhs, &rhs), 0.0, epsilon = 1e-2);
}
}

View File

@@ -14,30 +14,28 @@
use api::helper::ColumnDataTypeWrapper;
use api::v1::add_column_location::LocationType;
use api::v1::alter_table_expr::Kind;
use api::v1::column_def::as_fulltext_option;
use api::v1::alter_expr::Kind;
use api::v1::{
column_def, AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr,
DropColumns, ModifyColumnTypes, RenameTable, SemanticType,
column_def, AddColumnLocation as Location, AlterExpr, ChangeColumnTypes, CreateTableExpr,
DropColumns, RenameTable, SemanticType,
};
use common_query::AddColumnLocation;
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema};
use datatypes::schema::{ColumnSchema, RawSchema};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
use store_api::region_request::ChangeOption;
use table::metadata::TableId;
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest};
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest};
use crate::error::{
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
InvalidUnsetTableOptionRequestSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
UnknownLocationTypeSnafu,
InvalidChangeTableOptionRequestSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
};
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
/// Convert an [`AlterTableExpr`] to an [`AlterTableRequest`]
pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<AlterTableRequest> {
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<AlterTableRequest> {
let catalog_name = expr.catalog_name;
let schema_name = expr.schema_name;
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
@@ -68,25 +66,25 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
columns: add_column_requests,
}
}
Kind::ModifyColumnTypes(ModifyColumnTypes {
modify_column_types,
Kind::ChangeColumnTypes(ChangeColumnTypes {
change_column_types,
}) => {
let modify_column_type_requests = modify_column_types
let change_column_type_requests = change_column_types
.into_iter()
.map(|cct| {
let target_type =
ColumnDataTypeWrapper::new(cct.target_type(), cct.target_type_extension)
.into();
Ok(ModifyColumnTypeRequest {
Ok(ChangeColumnTypeRequest {
column_name: cct.column_name,
target_type,
})
})
.collect::<Result<Vec<_>>>()?;
AlterKind::ModifyColumnTypes {
columns: modify_column_type_requests,
AlterKind::ChangeColumnTypes {
columns: change_column_type_requests,
}
}
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
@@ -95,36 +93,14 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
Kind::RenameTable(RenameTable { new_table_name }) => {
AlterKind::RenameTable { new_table_name }
}
Kind::SetTableOptions(api::v1::SetTableOptions { table_options }) => {
AlterKind::SetTableOptions {
options: table_options
.iter()
.map(SetRegionOption::try_from)
.collect::<std::result::Result<Vec<_>, _>>()
.context(InvalidSetTableOptionRequestSnafu)?,
}
}
Kind::UnsetTableOptions(api::v1::UnsetTableOptions { keys }) => {
AlterKind::UnsetTableOptions {
keys: keys
.iter()
.map(|key| UnsetRegionOption::try_from(key.as_str()))
.collect::<std::result::Result<Vec<_>, _>>()
.context(InvalidUnsetTableOptionRequestSnafu)?,
}
}
Kind::SetColumnFulltext(c) => AlterKind::SetColumnFulltext {
column_name: c.column_name,
options: FulltextOptions {
enable: c.enable,
analyzer: as_fulltext_option(
Analyzer::try_from(c.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
),
case_sensitive: c.case_sensitive,
},
},
Kind::UnsetColumnFulltext(c) => AlterKind::UnsetColumnFulltext {
column_name: c.column_name,
Kind::ChangeTableOptions(api::v1::ChangeTableOptions {
change_table_options,
}) => AlterKind::ChangeTableOptions {
options: change_table_options
.iter()
.map(ChangeOption::try_from)
.collect::<std::result::Result<Vec<_>, _>>()
.context(InvalidChangeTableOptionRequestSnafu)?,
},
};
@@ -194,7 +170,7 @@ fn parse_location(location: Option<Location>) -> Result<Option<AddColumnLocation
#[cfg(test)]
mod tests {
use api::v1::{
AddColumn, AddColumns, ColumnDataType, ColumnDef, DropColumn, ModifyColumnType,
AddColumn, AddColumns, ChangeColumnType, ColumnDataType, ColumnDef, DropColumn,
SemanticType,
};
use datatypes::prelude::ConcreteDataType;
@@ -203,7 +179,7 @@ mod tests {
#[test]
fn test_alter_expr_to_request() {
let expr = AlterTableExpr {
let expr = AlterExpr {
catalog_name: String::default(),
schema_name: String::default(),
table_name: "monitor".to_string(),
@@ -244,7 +220,7 @@ mod tests {
#[test]
fn test_alter_expr_with_location_to_request() {
let expr = AlterTableExpr {
let expr = AlterExpr {
catalog_name: String::default(),
schema_name: String::default(),
table_name: "monitor".to_string(),
@@ -320,14 +296,14 @@ mod tests {
}
#[test]
fn test_modify_column_type_expr() {
let expr = AlterTableExpr {
fn test_change_column_type_expr() {
let expr = AlterExpr {
catalog_name: "test_catalog".to_string(),
schema_name: "test_schema".to_string(),
table_name: "monitor".to_string(),
kind: Some(Kind::ModifyColumnTypes(ModifyColumnTypes {
modify_column_types: vec![ModifyColumnType {
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
change_column_types: vec![ChangeColumnType {
column_name: "mem_usage".to_string(),
target_type: ColumnDataType::String as i32,
target_type_extension: None,
@@ -340,22 +316,22 @@ mod tests {
assert_eq!(alter_request.schema_name, "test_schema");
assert_eq!("monitor".to_string(), alter_request.table_name);
let mut modify_column_types = match alter_request.alter_kind {
AlterKind::ModifyColumnTypes { columns } => columns,
let mut change_column_types = match alter_request.alter_kind {
AlterKind::ChangeColumnTypes { columns } => columns,
_ => unreachable!(),
};
let modify_column_type = modify_column_types.pop().unwrap();
assert_eq!("mem_usage", modify_column_type.column_name);
let change_column_type = change_column_types.pop().unwrap();
assert_eq!("mem_usage", change_column_type.column_name);
assert_eq!(
ConcreteDataType::string_datatype(),
modify_column_type.target_type
change_column_type.target_type
);
}
#[test]
fn test_drop_column_expr() {
let expr = AlterTableExpr {
let expr = AlterExpr {
catalog_name: "test_catalog".to_string(),
schema_name: "test_schema".to_string(),
table_name: "monitor".to_string(),

View File

@@ -120,25 +120,11 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid set table option request"))]
InvalidSetTableOptionRequest {
#[snafu(display("Invalid change table option request"))]
InvalidChangeTableOptionRequest {
#[snafu(source)]
error: MetadataError,
},
#[snafu(display("Invalid unset table option request"))]
InvalidUnsetTableOptionRequest {
#[snafu(source)]
error: MetadataError,
},
#[snafu(display("Invalid set fulltext option request"))]
InvalidSetFulltextOptionRequest {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: prost::DecodeError,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -162,9 +148,7 @@ impl ErrorExt for Error {
Error::UnknownColumnDataType { .. } | Error::InvalidFulltextColumnType { .. } => {
StatusCode::InvalidArguments
}
Error::InvalidSetTableOptionRequest { .. }
| Error::InvalidUnsetTableOptionRequest { .. }
| Error::InvalidSetFulltextOptionRequest { .. } => StatusCode::InvalidArguments,
Error::InvalidChangeTableOptionRequest { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -14,11 +14,10 @@
use std::collections::HashSet;
use api::v1::column_data_type_extension::TypeExt;
use api::v1::column_def::contains_fulltext;
use api::v1::{
AddColumn, AddColumns, Column, ColumnDataType, ColumnDataTypeExtension, ColumnDef,
ColumnOptions, ColumnSchema, CreateTableExpr, JsonTypeExtension, SemanticType,
ColumnOptions, ColumnSchema, CreateTableExpr, SemanticType,
};
use datatypes::schema::Schema;
use snafu::{ensure, OptionExt, ResultExt};
@@ -26,9 +25,8 @@ use table::metadata::TableId;
use table::table_reference::TableReference;
use crate::error::{
self, DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu,
InvalidFulltextColumnTypeSnafu, MissingTimestampColumnSnafu, Result,
UnknownColumnDataTypeSnafu,
DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu, InvalidFulltextColumnTypeSnafu,
MissingTimestampColumnSnafu, Result, UnknownColumnDataTypeSnafu,
};
pub struct ColumnExpr<'a> {
pub column_name: &'a str,
@@ -74,28 +72,6 @@ impl<'a> From<&'a ColumnSchema> for ColumnExpr<'a> {
}
}
fn infer_column_datatype(
datatype: i32,
datatype_extension: &Option<ColumnDataTypeExtension>,
) -> Result<ColumnDataType> {
let column_type =
ColumnDataType::try_from(datatype).context(UnknownColumnDataTypeSnafu { datatype })?;
if matches!(&column_type, ColumnDataType::Binary) {
if let Some(ext) = datatype_extension {
let type_ext = ext
.type_ext
.as_ref()
.context(error::MissingFieldSnafu { field: "type_ext" })?;
if *type_ext == TypeExt::JsonType(JsonTypeExtension::JsonBinary.into()) {
return Ok(ColumnDataType::Json);
}
}
}
Ok(column_type)
}
pub fn build_create_table_expr(
table_id: Option<TableId>,
table_name: &TableReference<'_>,
@@ -148,7 +124,8 @@ pub fn build_create_table_expr(
_ => {}
}
let column_type = infer_column_datatype(datatype, datatype_extension)?;
let column_type =
ColumnDataType::try_from(datatype).context(UnknownColumnDataTypeSnafu { datatype })?;
ensure!(
!contains_fulltext(options) || column_type == ColumnDataType::String,

View File

@@ -218,12 +218,6 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
Decimal128Vector,
decimal128_values,
|x| { convert_to_pb_decimal128(x) }
),
(
ConcreteDataType::Vector(_),
BinaryVector,
binary_values,
|x| { x.into() }
)
)
}

View File

@@ -39,7 +39,7 @@ derive_builder.workspace = true
etcd-client.workspace = true
futures.workspace = true
futures-util.workspace = true
hex.workspace = true
hex = { version = "0.4" }
humantime-serde.workspace = true
itertools.workspace = true
lazy_static.workspace = true

View File

@@ -77,7 +77,7 @@ pub struct RegionStat {
pub rcus: i64,
/// The write capacity units during this period
pub wcus: i64,
/// Approximate disk bytes of this region, including sst, index, manifest and wal
/// Approximate bytes of this region
pub approximate_bytes: u64,
/// The engine name.
pub engine: String,

View File

@@ -32,7 +32,6 @@ use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
use crate::{ClusterId, DatanodeId};
pub mod alter_database;
pub mod alter_logical_tables;
pub mod alter_table;
pub mod create_database;

View File

@@ -1,248 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use common_telemetry::tracing::info;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
use strum::AsRefStr;
use super::utils::handle_retry_error;
use crate::cache_invalidator::Context;
use crate::ddl::DdlContext;
use crate::error::{Result, SchemaNotFoundSnafu};
use crate::instruction::CacheIdent;
use crate::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue};
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock};
use crate::rpc::ddl::UnsetDatabaseOption::{self};
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
use crate::ClusterId;
pub struct AlterDatabaseProcedure {
pub context: DdlContext,
pub data: AlterDatabaseData,
}
fn build_new_schema_value(
mut value: SchemaNameValue,
alter_kind: &AlterDatabaseKind,
) -> Result<SchemaNameValue> {
match alter_kind {
AlterDatabaseKind::SetDatabaseOptions(options) => {
for option in options.0.iter() {
match option {
SetDatabaseOption::Ttl(ttl) => {
if ttl.is_zero() {
value.ttl = None;
} else {
value.ttl = Some(*ttl);
}
}
}
}
}
AlterDatabaseKind::UnsetDatabaseOptions(keys) => {
for key in keys.0.iter() {
match key {
UnsetDatabaseOption::Ttl => value.ttl = None,
}
}
}
}
Ok(value)
}
impl AlterDatabaseProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
pub fn new(
cluster_id: ClusterId,
task: AlterDatabaseTask,
context: DdlContext,
) -> Result<Self> {
Ok(Self {
context,
data: AlterDatabaseData::new(task, cluster_id)?,
})
}
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(Self { context, data })
}
pub async fn on_prepare(&mut self) -> Result<Status> {
let value = self
.context
.table_metadata_manager
.schema_manager()
.get(SchemaNameKey::new(self.data.catalog(), self.data.schema()))
.await?;
ensure!(
value.is_some(),
SchemaNotFoundSnafu {
table_schema: self.data.schema(),
}
);
self.data.schema_value = value;
self.data.state = AlterDatabaseState::UpdateMetadata;
Ok(Status::executing(true))
}
pub async fn on_update_metadata(&mut self) -> Result<Status> {
let schema_name = SchemaNameKey::new(self.data.catalog(), self.data.schema());
// Safety: schema_value is not None.
let current_schema_value = self.data.schema_value.as_ref().unwrap();
let new_schema_value = build_new_schema_value(
current_schema_value.get_inner_ref().clone(),
&self.data.kind,
)?;
self.context
.table_metadata_manager
.schema_manager()
.update(schema_name, current_schema_value, &new_schema_value)
.await?;
info!("Updated database metadata for schema {schema_name}");
self.data.state = AlterDatabaseState::InvalidateSchemaCache;
Ok(Status::executing(true))
}
pub async fn on_invalidate_schema_cache(&mut self) -> Result<Status> {
let cache_invalidator = &self.context.cache_invalidator;
cache_invalidator
.invalidate(
&Context::default(),
&[CacheIdent::SchemaName(SchemaName {
catalog_name: self.data.catalog().to_string(),
schema_name: self.data.schema().to_string(),
})],
)
.await?;
Ok(Status::done())
}
}
#[async_trait]
impl Procedure for AlterDatabaseProcedure {
fn type_name(&self) -> &str {
Self::TYPE_NAME
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
match self.data.state {
AlterDatabaseState::Prepare => self.on_prepare().await,
AlterDatabaseState::UpdateMetadata => self.on_update_metadata().await,
AlterDatabaseState::InvalidateSchemaCache => self.on_invalidate_schema_cache().await,
}
.map_err(handle_retry_error)
}
fn dump(&self) -> ProcedureResult<String> {
serde_json::to_string(&self.data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
let catalog = self.data.catalog();
let schema = self.data.schema();
let lock_key = vec![
CatalogLock::Read(catalog).into(),
SchemaLock::write(catalog, schema).into(),
];
LockKey::new(lock_key)
}
}
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
enum AlterDatabaseState {
Prepare,
UpdateMetadata,
InvalidateSchemaCache,
}
/// The data of alter database procedure.
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterDatabaseData {
cluster_id: ClusterId,
state: AlterDatabaseState,
kind: AlterDatabaseKind,
catalog_name: String,
schema_name: String,
schema_value: Option<DeserializedValueWithBytes<SchemaNameValue>>,
}
impl AlterDatabaseData {
pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
Ok(Self {
cluster_id,
state: AlterDatabaseState::Prepare,
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
catalog_name: task.alter_expr.catalog_name,
schema_name: task.alter_expr.schema_name,
schema_value: None,
})
}
pub fn catalog(&self) -> &str {
&self.catalog_name
}
pub fn schema(&self) -> &str {
&self.schema_name
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::ddl::alter_database::build_new_schema_value;
use crate::key::schema_name::SchemaNameValue;
use crate::rpc::ddl::{
AlterDatabaseKind, SetDatabaseOption, SetDatabaseOptions, UnsetDatabaseOption,
UnsetDatabaseOptions,
};
#[test]
fn test_build_new_schema_value() {
let set_ttl = AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(vec![
SetDatabaseOption::Ttl(Duration::from_secs(10)),
]));
let current_schema_value = SchemaNameValue::default();
let new_schema_value =
build_new_schema_value(current_schema_value.clone(), &set_ttl).unwrap();
assert_eq!(new_schema_value.ttl, Some(Duration::from_secs(10)));
let unset_ttl_alter_kind =
AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(vec![
UnsetDatabaseOption::Ttl,
]));
let new_schema_value =
build_new_schema_value(current_schema_value, &unset_ttl_alter_kind).unwrap();
assert_eq!(new_schema_value.ttl, None);
}
}

View File

@@ -14,7 +14,7 @@
use std::collections::HashSet;
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use snafu::{ensure, OptionExt};
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;

View File

@@ -13,7 +13,7 @@
// limitations under the License.
use api::v1;
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use api::v1::region::{
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
RegionColumnDef, RegionRequest, RegionRequestHeader,

View File

@@ -19,7 +19,7 @@ mod update_metadata;
use std::vec;
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use api::v1::RenameTable;
use async_trait::async_trait;
use common_error::ext::ErrorExt;

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use api::v1::RenameTable;
use common_catalog::format_full_table_name;
use snafu::ensure;

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use api::v1::region::region_request::Body;
use api::v1::region::{
alter_request, AddColumn, AddColumns, AlterRequest, DropColumn, DropColumns, RegionColumnDef,
@@ -91,7 +91,7 @@ fn create_proto_alter_kind(
add_columns,
})))
}
Kind::ModifyColumnTypes(x) => Ok(Some(alter_request::Kind::ModifyColumnTypes(x.clone()))),
Kind::ChangeColumnTypes(x) => Ok(Some(alter_request::Kind::ChangeColumnTypes(x.clone()))),
Kind::DropColumns(x) => {
let drop_columns = x
.drop_columns
@@ -106,12 +106,7 @@ fn create_proto_alter_kind(
})))
}
Kind::RenameTable(_) => Ok(None),
Kind::SetTableOptions(v) => Ok(Some(alter_request::Kind::SetTableOptions(v.clone()))),
Kind::UnsetTableOptions(v) => Ok(Some(alter_request::Kind::UnsetTableOptions(v.clone()))),
Kind::SetColumnFulltext(v) => Ok(Some(alter_request::Kind::SetColumnFulltext(v.clone()))),
Kind::UnsetColumnFulltext(v) => {
Ok(Some(alter_request::Kind::UnsetColumnFulltext(v.clone())))
}
Kind::ChangeTableOptions(v) => Ok(Some(alter_request::Kind::ChangeTableOptions(v.clone()))),
}
}
@@ -121,12 +116,12 @@ mod tests {
use std::sync::Arc;
use api::v1::add_column_location::LocationType;
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use api::v1::region::region_request::Body;
use api::v1::region::RegionColumnDef;
use api::v1::{
region, AddColumn, AddColumnLocation, AddColumns, AlterTableExpr, ColumnDataType,
ColumnDef as PbColumnDef, ModifyColumnType, ModifyColumnTypes, SemanticType,
region, AddColumn, AddColumnLocation, AddColumns, AlterExpr, ChangeColumnType,
ChangeColumnTypes, ColumnDataType, ColumnDef as PbColumnDef, SemanticType,
};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use store_api::storage::{RegionId, TableId};
@@ -215,7 +210,7 @@ mod tests {
prepare_ddl_context().await;
let task = AlterTableTask {
alter_table: AlterTableExpr {
alter_table: AlterExpr {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name,
@@ -282,12 +277,12 @@ mod tests {
prepare_ddl_context().await;
let task = AlterTableTask {
alter_table: AlterTableExpr {
alter_table: AlterExpr {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name,
kind: Some(Kind::ModifyColumnTypes(ModifyColumnTypes {
modify_column_types: vec![ModifyColumnType {
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
change_column_types: vec![ChangeColumnType {
column_name: "cpu".to_string(),
target_type: ColumnDataType::String as i32,
target_type_extension: None,
@@ -308,9 +303,9 @@ mod tests {
assert_eq!(alter_region_request.schema_version, 1);
assert_eq!(
alter_region_request.kind,
Some(region::alter_request::Kind::ModifyColumnTypes(
ModifyColumnTypes {
modify_column_types: vec![ModifyColumnType {
Some(region::alter_request::Kind::ChangeColumnTypes(
ChangeColumnTypes {
change_column_types: vec![ChangeColumnType {
column_name: "cpu".to_string(),
target_type: ColumnDataType::String as i32,
target_type_extension: None,

View File

@@ -52,11 +52,8 @@ impl AlterTableProcedure {
new_info.name = new_table_name.to_string();
}
AlterKind::DropColumns { .. }
| AlterKind::ModifyColumnTypes { .. }
| AlterKind::SetTableOptions { .. }
| AlterKind::UnsetTableOptions { .. }
| AlterKind::SetColumnFulltext { .. }
| AlterKind::UnsetColumnFulltext { .. } => {}
| AlterKind::ChangeColumnTypes { .. }
| AlterKind::ChangeTableOptions { .. } => {}
}
Ok(new_info)

View File

@@ -28,7 +28,6 @@ use common_procedure::{
use common_telemetry::info;
use common_telemetry::tracing_context::TracingContext;
use futures::future::join_all;
use futures::TryStreamExt;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
@@ -44,7 +43,7 @@ use crate::instruction::{CacheIdent, CreateFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
use crate::key::table_name::TableNameKey;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
use crate::key::{FlowId, FlowPartitionId};
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
use crate::peer::Peer;
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
@@ -76,7 +75,6 @@ impl CreateFlowProcedure {
source_table_ids: vec![],
query_context,
state: CreateFlowState::Prepare,
prev_flow_info_value: None,
},
}
}
@@ -92,7 +90,6 @@ impl CreateFlowProcedure {
let flow_name = &self.data.task.flow_name;
let sink_table_name = &self.data.task.sink_table_name;
let create_if_not_exists = self.data.task.create_if_not_exists;
let or_replace = self.data.task.or_replace;
let flow_name_value = self
.context
@@ -101,56 +98,16 @@ impl CreateFlowProcedure {
.get(catalog_name, flow_name)
.await?;
if create_if_not_exists && or_replace {
// this is forbidden because not clear what does that mean exactly
return error::UnsupportedSnafu {
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`".to_string(),
}
.fail();
}
if let Some(value) = flow_name_value {
ensure!(
create_if_not_exists || or_replace,
create_if_not_exists,
error::FlowAlreadyExistsSnafu {
flow_name: format_full_flow_name(catalog_name, flow_name),
}
);
let flow_id = value.flow_id();
if create_if_not_exists {
info!("Flow already exists, flow_id: {}", flow_id);
return Ok(Status::done_with_output(flow_id));
}
let flow_id = value.flow_id();
let peers = self
.context
.flow_metadata_manager
.flow_route_manager()
.routes(flow_id)
.map_ok(|(_, value)| value.peer)
.try_collect::<Vec<_>>()
.await?;
self.data.flow_id = Some(flow_id);
self.data.peers = peers;
info!("Replacing flow, flow_id: {}", flow_id);
let flow_info_value = self
.context
.flow_metadata_manager
.flow_info_manager()
.get_raw(flow_id)
.await?;
ensure!(
flow_info_value.is_some(),
error::FlowNotFoundSnafu {
flow_name: format_full_flow_name(catalog_name, flow_name),
}
);
self.data.prev_flow_info_value = flow_info_value;
return Ok(Status::done_with_output(flow_id));
}
// Ensures sink table doesn't exist.
@@ -171,9 +128,7 @@ impl CreateFlowProcedure {
}
self.collect_source_tables().await?;
if self.data.flow_id.is_none() {
self.allocate_flow_id().await?;
}
self.allocate_flow_id().await?;
self.data.state = CreateFlowState::CreateFlows;
Ok(Status::executing(true))
@@ -198,10 +153,7 @@ impl CreateFlowProcedure {
.map_err(add_peer_context_if_needed(peer.clone()))
});
}
info!(
"Creating flow({:?}) on flownodes with peers={:?}",
self.data.flow_id, self.data.peers
);
join_all(create_flow)
.await
.into_iter()
@@ -218,29 +170,18 @@ impl CreateFlowProcedure {
async fn on_create_metadata(&mut self) -> Result<Status> {
// Safety: The flow id must be allocated.
let flow_id = self.data.flow_id.unwrap();
// TODO(weny): Support `or_replace`.
let (flow_info, flow_routes) = (&self.data).into();
if let Some(prev_flow_value) = self.data.prev_flow_info_value.as_ref()
&& self.data.task.or_replace
{
self.context
.flow_metadata_manager
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
.await?;
info!("Replaced flow metadata for flow {flow_id}");
} else {
self.context
.flow_metadata_manager
.create_flow_metadata(flow_id, flow_info, flow_routes)
.await?;
info!("Created flow metadata for flow {flow_id}");
}
self.context
.flow_metadata_manager
.create_flow_metadata(flow_id, flow_info, flow_routes)
.await?;
info!("Created flow metadata for flow {flow_id}");
self.data.state = CreateFlowState::InvalidateFlowCache;
Ok(Status::executing(true))
}
async fn on_broadcast(&mut self) -> Result<Status> {
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
// Safety: The flow id must be allocated.
let flow_id = self.data.flow_id.unwrap();
let ctx = Context {
@@ -251,13 +192,10 @@ impl CreateFlowProcedure {
.cache_invalidator
.invalidate(
&ctx,
&[
CacheIdent::CreateFlow(CreateFlow {
source_table_ids: self.data.source_table_ids.clone(),
flownodes: self.data.peers.clone(),
}),
CacheIdent::FlowId(flow_id),
],
&[CacheIdent::CreateFlow(CreateFlow {
source_table_ids: self.data.source_table_ids.clone(),
flownodes: self.data.peers.clone(),
})],
)
.await?;
@@ -332,9 +270,6 @@ pub struct CreateFlowData {
pub(crate) peers: Vec<Peer>,
pub(crate) source_table_ids: Vec<TableId>,
pub(crate) query_context: QueryContext,
/// For verify if prev value is consistent when need to update flow metadata.
/// only set when `or_replace` is true.
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
}
impl From<&CreateFlowData> for CreateRequest {
@@ -349,9 +284,8 @@ impl From<&CreateFlowData> for CreateRequest {
.map(|table_id| api::v1::TableId { id: *table_id })
.collect_vec(),
sink_table_name: Some(value.task.sink_table_name.clone().into()),
// Always be true to ensure idempotent in case of retry
// Always be true
create_if_not_exists: true,
or_replace: value.task.or_replace,
expire_after: value.task.expire_after.map(|value| ExpireAfter { value }),
comment: value.task.comment.clone(),
sql: value.task.sql.clone(),

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::alter_table_expr::Kind;
use api::v1::{AddColumn, AddColumns, AlterTableExpr, ColumnDef, RenameTable};
use api::v1::alter_expr::Kind;
use api::v1::{AddColumn, AddColumns, AlterExpr, ColumnDef, RenameTable};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use derive_builder::Builder;
@@ -32,7 +32,7 @@ pub struct TestAlterTableExpr {
new_table_name: Option<String>,
}
impl From<TestAlterTableExpr> for AlterTableExpr {
impl From<TestAlterTableExpr> for AlterExpr {
fn from(value: TestAlterTableExpr) -> Self {
if let Some(new_table_name) = value.new_table_name {
Self {

View File

@@ -16,11 +16,11 @@ use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::Arc;
use api::v1::alter_table_expr::Kind;
use api::v1::alter_expr::Kind;
use api::v1::region::{region_request, RegionRequest};
use api::v1::{
AddColumn, AddColumns, AlterTableExpr, ColumnDataType, ColumnDef as PbColumnDef, DropColumn,
DropColumns, SemanticType, SetTableOptions,
AddColumn, AddColumns, AlterExpr, ChangeTableOption, ChangeTableOptions, ColumnDataType,
ColumnDef as PbColumnDef, DropColumn, DropColumns, SemanticType,
};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::ErrorExt;
@@ -133,7 +133,7 @@ async fn test_on_submit_alter_request() {
.unwrap();
let alter_table_task = AlterTableTask {
alter_table: AlterTableExpr {
alter_table: AlterExpr {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
@@ -219,7 +219,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
.unwrap();
let alter_table_task = AlterTableTask {
alter_table: AlterTableExpr {
alter_table: AlterExpr {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
@@ -316,7 +316,7 @@ async fn test_on_update_metadata_add_columns() {
.unwrap();
let task = AlterTableTask {
alter_table: AlterTableExpr {
alter_table: AlterExpr {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
@@ -385,12 +385,12 @@ async fn test_on_update_table_options() {
.unwrap();
let task = AlterTableTask {
alter_table: AlterTableExpr {
alter_table: AlterExpr {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
kind: Some(Kind::SetTableOptions(SetTableOptions {
table_options: vec![api::v1::Option {
kind: Some(Kind::ChangeTableOptions(ChangeTableOptions {
change_table_options: vec![ChangeTableOption {
key: TTL_KEY.to_string(),
value: "1d".to_string(),
}],

View File

@@ -24,7 +24,6 @@ use derive_builder::Builder;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::TableId;
use crate::ddl::alter_database::AlterDatabaseProcedure;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_database::CreateDatabaseProcedure;
@@ -48,13 +47,12 @@ use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use crate::rpc::ddl::DdlTask::{
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
TruncateTable,
AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables, CreateTable,
CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView, TruncateTable,
};
use crate::rpc::ddl::{
AlterDatabaseTask, AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask,
CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask, CreateViewTask,
DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
};
use crate::rpc::procedure;
@@ -131,7 +129,6 @@ impl DdlManager {
CreateFlowProcedure,
AlterTableProcedure,
AlterLogicalTablesProcedure,
AlterDatabaseProcedure,
DropTableProcedure,
DropFlowProcedure,
TruncateTableProcedure,
@@ -297,18 +294,6 @@ impl DdlManager {
self.submit_procedure(procedure_with_id).await
}
pub async fn submit_alter_database(
&self,
cluster_id: ClusterId,
alter_database_task: AlterDatabaseTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
}
/// Submits and executes a create flow task.
#[tracing::instrument(skip_all)]
pub async fn submit_create_flow_task(
@@ -608,28 +593,6 @@ async fn handle_drop_database_task(
})
}
async fn handle_alter_database_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
alter_database_task: AlterDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_alter_database(cluster_id, alter_database_task.clone())
.await?;
let procedure_id = id.to_string();
info!(
"Database {}.{} is altered via procedure_id {id:?}",
alter_database_task.catalog(),
alter_database_task.schema()
);
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
..Default::default()
})
}
async fn handle_drop_flow_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
@@ -692,17 +655,10 @@ async fn handle_create_flow_task(
procedure_id: &procedure_id,
err_msg: "downcast to `u32`",
})?);
if !create_flow_task.or_replace {
info!(
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
create_flow_task.catalog_name, create_flow_task.flow_name,
);
} else {
info!(
"Flow {}.{}({flow_id}) is replaced via procedure_id {id:?}",
create_flow_task.catalog_name, create_flow_task.flow_name,
);
}
info!(
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
create_flow_task.catalog_name, create_flow_task.flow_name,
);
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
@@ -816,9 +772,6 @@ impl ProcedureExecutor for DdlManager {
DropDatabase(drop_database_task) => {
handle_drop_database_task(self, cluster_id, drop_database_task).await
}
AlterDatabase(alter_database_task) => {
handle_alter_database_task(self, cluster_id, alter_database_task).await
}
CreateFlow(create_flow_task) => {
handle_create_flow_task(
self,

View File

@@ -593,21 +593,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Invalid set database option, key: {}, value: {}", key, value))]
InvalidSetDatabaseOption {
key: String,
value: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid unset database option, key: {}", key))]
InvalidUnsetDatabaseOption {
key: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
MismatchPrefix {
prefix: String,
@@ -745,9 +730,7 @@ impl ErrorExt for Error {
| AlterLogicalTablesInvalidArguments { .. }
| CreateLogicalTablesInvalidArguments { .. }
| MismatchPrefix { .. }
| TlsConfig { .. }
| InvalidSetDatabaseOption { .. }
| InvalidUnsetDatabaseOption { .. } => StatusCode::InvalidArguments,
| TlsConfig { .. } => StatusCode::InvalidArguments,
FlowNotFound { .. } => StatusCode::FlowNotFound,
FlowRouteNotFound { .. } => StatusCode::Unexpected,

View File

@@ -90,9 +90,7 @@
pub mod catalog_name;
pub mod datanode_table;
pub mod flow;
pub mod maintenance;
pub mod node_address;
mod schema_metadata_manager;
pub mod schema_name;
pub mod table_info;
pub mod table_name;
@@ -118,7 +116,6 @@ use flow::flow_route::FlowRouteValue;
use flow::table_flow::TableFlowValue;
use lazy_static::lazy_static;
use regex::Regex;
pub use schema_metadata_manager::{SchemaMetadataManager, SchemaMetadataManagerRef};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
@@ -565,13 +562,13 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_table_info = on_create_table_info_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
err_msg: "Reads the empty table info during the create table metadata",
})?
.into_inner();
let remote_view_info = on_create_view_info_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty view info in comparing operation of creating view metadata",
err_msg: "Reads the empty view info during the create view info",
})?
.into_inner();
@@ -644,13 +641,13 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_table_info = on_create_table_info_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
err_msg: "Reads the empty table info during the create table metadata",
})?
.into_inner();
let remote_table_route = on_create_table_route_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table route in comparing operation of creating table metadata",
err_msg: "Reads the empty table route during the create table metadata",
})?
.into_inner();
@@ -731,13 +728,13 @@ impl TableMetadataManager {
for on_failure in on_failures {
let remote_table_info = (on_failure.on_create_table_info_failure)(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
err_msg: "Reads the empty table info during the create table metadata",
})?
.into_inner();
let remote_table_route = (on_failure.on_create_table_route_failure)(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table route in comparing operation of creating table metadata",
err_msg: "Reads the empty table route during the create table metadata",
})?
.into_inner();
@@ -915,7 +912,7 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_table_info = on_update_table_info_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info in comparing operation of the rename table metadata",
err_msg: "Reads the empty table info during the rename table metadata",
})?
.into_inner();
@@ -961,7 +958,7 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_table_info = on_update_table_info_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info in comparing operation of the updating table info",
err_msg: "Reads the empty table info during the updating table info",
})?
.into_inner();
@@ -1012,7 +1009,7 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_view_info = on_update_view_info_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty view info in comparing operation of the updating view info",
err_msg: "Reads the empty view info during the updating view info",
})?
.into_inner();
@@ -1069,7 +1066,7 @@ impl TableMetadataManager {
for on_failure in on_failures {
let remote_table_info = (on_failure.on_update_table_info_failure)(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info in comparing operation of the updating table info",
err_msg: "Reads the empty table info during the updating table info",
})?
.into_inner();
@@ -1121,7 +1118,7 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_table_route = on_update_table_route_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table route in comparing operation of the updating table route",
err_msg: "Reads the empty table route during the updating table route",
})?
.into_inner();
@@ -1173,7 +1170,7 @@ impl TableMetadataManager {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_table_route = on_update_table_route_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table route in comparing operation of the updating leader region status",
err_msg: "Reads the empty table route during the updating leader region status",
})?
.into_inner();
@@ -1261,8 +1258,7 @@ impl_metadata_value! {
FlowNameValue,
FlowRouteValue,
TableFlowValue,
NodeAddressValue,
SchemaNameValue
NodeAddressValue
}
impl_optional_metadata_value! {

View File

@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
use crate::key::flow::flownode_flow::FlownodeFlowManager;
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, FlowId, MetadataKey};
use crate::key::{FlowId, MetadataKey};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchDeleteRequest;
@@ -197,7 +197,7 @@ impl FlowMetadataManager {
on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
error::UnexpectedSnafu {
err_msg: format!(
"Reads the empty flow name in comparing operation of the creating flow, flow_id: {flow_id}"
"Reads the empty flow name during the creating flow, flow_id: {flow_id}"
),
}
})?;
@@ -220,7 +220,7 @@ impl FlowMetadataManager {
let remote_flow =
on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
err_msg: format!(
"Reads the empty flow in comparing operation of creating flow, flow_id: {flow_id}"
"Reads the empty flow during the creating flow, flow_id: {flow_id}"
),
})?;
let op_name = "creating flow";
@@ -230,102 +230,6 @@ impl FlowMetadataManager {
Ok(())
}
/// Update metadata for flow and returns an error if old metadata IS NOT exists.
pub async fn update_flow_metadata(
&self,
flow_id: FlowId,
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
new_flow_info: &FlowInfoValue,
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
) -> Result<()> {
let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) =
self.flow_name_manager.build_update_txn(
&new_flow_info.catalog_name,
&new_flow_info.flow_name,
flow_id,
)?;
let (create_flow_txn, on_create_flow_failure) =
self.flow_info_manager
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
let create_flow_routes_txn = self
.flow_route_manager
.build_create_txn(flow_id, flow_routes.clone())?;
let create_flownode_flow_txn = self
.flownode_flow_manager
.build_create_txn(flow_id, new_flow_info.flownode_ids().clone());
let create_table_flow_txn = self.table_flow_manager.build_create_txn(
flow_id,
flow_routes
.into_iter()
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
.collect(),
new_flow_info.source_table_ids(),
)?;
let txn = Txn::merge_all(vec![
create_flow_flow_name_txn,
create_flow_txn,
create_flow_routes_txn,
create_flownode_flow_txn,
create_table_flow_txn,
]);
info!(
"Creating flow {}.{}({}), with {} txn operations",
new_flow_info.catalog_name,
new_flow_info.flow_name,
flow_id,
txn.max_operations()
);
let mut resp = self.kv_backend.txn(txn).await?;
if !resp.succeeded {
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
let remote_flow_flow_name =
on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
error::UnexpectedSnafu {
err_msg: format!(
"Reads the empty flow name in comparing operation of the updating flow, flow_id: {flow_id}"
),
}
})?;
if remote_flow_flow_name.flow_id() != flow_id {
info!(
"Trying to updating flow {}.{}({}), but flow({}) already exists with a different flow id",
new_flow_info.catalog_name,
new_flow_info.flow_name,
flow_id,
remote_flow_flow_name.flow_id()
);
return error::UnexpectedSnafu {
err_msg: format!(
"Reads different flow id when updating flow({2}.{3}), prev flow id = {0}, updating with flow id = {1}",
remote_flow_flow_name.flow_id(),
flow_id,
new_flow_info.catalog_name,
new_flow_info.flow_name,
),
}.fail();
}
let remote_flow =
on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
err_msg: format!(
"Reads the empty flow in comparing operation of the updating flow, flow_id: {flow_id}"
),
})?;
let op_name = "updating flow";
ensure_values!(*remote_flow, new_flow_info.clone(), op_name);
}
Ok(())
}
fn flow_metadata_keys(&self, flow_id: FlowId, flow_value: &FlowInfoValue) -> Vec<Vec<u8>> {
let source_table_ids = flow_value.source_table_ids();
let mut keys =
@@ -656,222 +560,4 @@ mod tests {
// Ensures all keys are deleted
assert!(mem_kv.is_empty())
}
#[tokio::test]
async fn test_update_flow_metadata() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
let flow_id = 10;
let flow_value = test_flow_info_value(
"flow",
[(0, 1u64), (1, 2u64)].into(),
vec![1024, 1025, 1026],
);
let flow_routes = vec![
(
1u32,
FlowRouteValue {
peer: Peer::empty(1),
},
),
(
2,
FlowRouteValue {
peer: Peer::empty(2),
},
),
];
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
.await
.unwrap();
let new_flow_value = {
let mut tmp = flow_value.clone();
tmp.raw_sql = "new".to_string();
tmp
};
// Update flow instead
flow_metadata_manager
.update_flow_metadata(
flow_id,
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
&new_flow_value,
flow_routes.clone(),
)
.await
.unwrap();
let got = flow_metadata_manager
.flow_info_manager()
.get(flow_id)
.await
.unwrap()
.unwrap();
let routes = flow_metadata_manager
.flow_route_manager()
.routes(flow_id)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(
routes,
vec![
(
FlowRouteKey::new(flow_id, 1),
FlowRouteValue {
peer: Peer::empty(1),
},
),
(
FlowRouteKey::new(flow_id, 2),
FlowRouteValue {
peer: Peer::empty(2),
},
),
]
);
assert_eq!(got, new_flow_value);
let flows = flow_metadata_manager
.flownode_flow_manager()
.flows(1)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(flows, vec![(flow_id, 0)]);
for table_id in [1024, 1025, 1026] {
let nodes = flow_metadata_manager
.table_flow_manager()
.flows(table_id)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(
nodes,
vec![
(
TableFlowKey::new(table_id, 1, flow_id, 1),
TableFlowValue {
peer: Peer::empty(1)
}
),
(
TableFlowKey::new(table_id, 2, flow_id, 2),
TableFlowValue {
peer: Peer::empty(2)
}
)
]
);
}
}
#[tokio::test]
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
let flow_id = 10;
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
let flow_routes = vec![
(
1u32,
FlowRouteValue {
peer: Peer::empty(1),
},
),
(
2,
FlowRouteValue {
peer: Peer::empty(2),
},
),
];
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
.await
.unwrap();
// update again with same flow id
flow_metadata_manager
.update_flow_metadata(
flow_id,
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
&flow_value,
flow_routes.clone(),
)
.await
.unwrap();
// update again with wrong flow id, expected error
let err = flow_metadata_manager
.update_flow_metadata(
flow_id + 1,
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
&flow_value,
flow_routes,
)
.await
.unwrap_err();
assert_matches!(err, error::Error::Unexpected { .. });
assert!(err
.to_string()
.contains("Reads different flow id when updating flow"));
}
#[tokio::test]
async fn test_update_flow_metadata_unexpected_err_prev_value_diff() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
let flow_id = 10;
let catalog_name = "greptime";
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
let flow_routes = vec![
(
1u32,
FlowRouteValue {
peer: Peer::empty(1),
},
),
(
2,
FlowRouteValue {
peer: Peer::empty(2),
},
),
];
flow_metadata_manager
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
.await
.unwrap();
// Creates again.
let another_sink_table_name = TableName {
catalog_name: catalog_name.to_string(),
schema_name: "my_schema".to_string(),
table_name: "another_sink_table".to_string(),
};
let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: another_sink_table_name,
flownode_ids: [(0, 1u64)].into(),
raw_sql: "raw".to_string(),
expire_after: Some(300),
comment: "hi".to_string(),
options: Default::default(),
};
let err = flow_metadata_manager
.update_flow_metadata(
flow_id,
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
&flow_value,
flow_routes.clone(),
)
.await
.unwrap_err();
assert!(
err.to_string().contains("Reads the different value"),
"error: {:?}",
err
);
}
}

View File

@@ -26,7 +26,7 @@ use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::FlownodeId;
@@ -196,19 +196,6 @@ impl FlowInfoManager {
.transpose()
}
/// Returns the [FlowInfoValue] with original bytes of specified `flow_id`.
pub async fn get_raw(
&self,
flow_id: FlowId,
) -> Result<Option<DeserializedValueWithBytes<FlowInfoValue>>> {
let key = FlowInfoKey::new(flow_id).to_bytes();
self.kv_backend
.get(&key)
.await?
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
.transpose()
}
/// Builds a create flow transaction.
/// It is expected that the `__flow/info/{flow_id}` wasn't occupied.
/// Otherwise, the transaction will retrieve existing value.
@@ -228,36 +215,6 @@ impl FlowInfoManager {
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
))
}
/// Builds a update flow transaction.
/// It is expected that the `__flow/info/{flow_id}` IS ALREADY occupied and equal to `prev_flow_value`,
/// but the new value can be the same, so to allow replace operation to happen even when the value is the same.
/// Otherwise, the transaction will retrieve existing value and fail.
pub(crate) fn build_update_txn(
&self,
flow_id: FlowId,
current_flow_value: &DeserializedValueWithBytes<FlowInfoValue>,
new_flow_value: &FlowInfoValue,
) -> Result<(
Txn,
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowInfoDecodeResult,
)> {
let key = FlowInfoKey::new(flow_id).to_bytes();
let raw_value = new_flow_value.try_as_raw_value()?;
let prev_value = current_flow_value.get_raw_bytes();
let txn = Txn::new()
.when(vec![
Compare::new(key.clone(), CompareOp::NotEqual, None),
Compare::new(key.clone(), CompareOp::Equal, Some(prev_value)),
])
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
.or_else(vec![TxnOp::Get(key.clone())]);
Ok((
txn,
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
))
}
}
#[cfg(test)]

View File

@@ -26,7 +26,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
@@ -237,37 +237,6 @@ impl FlowNameManager {
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
))
}
/// Builds a update flow name transaction. Which doesn't change either the name or id, just checking if they are the same.
/// It's expected that the `__flow/name/{catalog}/{flow_name}` IS already occupied,
/// and both flow name and flow id is the same.
/// Otherwise, the transaction will retrieve existing value(and fail).
pub fn build_update_txn(
&self,
catalog_name: &str,
flow_name: &str,
flow_id: FlowId,
) -> Result<(
Txn,
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowNameDecodeResult,
)> {
let key = FlowNameKey::new(catalog_name, flow_name);
let raw_key = key.to_bytes();
let flow_flow_name_value = FlowNameValue::new(flow_id);
let raw_value = flow_flow_name_value.try_as_raw_value()?;
let txn = Txn::new()
.when(vec![Compare::new(
raw_key.clone(),
CompareOp::Equal,
Some(raw_value),
)])
.or_else(vec![TxnOp::Get(raw_key.clone())]);
Ok((
txn,
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
))
}
}
#[cfg(test)]

View File

@@ -1,86 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use crate::error::Result;
use crate::key::MAINTENANCE_KEY;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::PutRequest;
pub type MaintenanceModeManagerRef = Arc<MaintenanceModeManager>;
/// The maintenance mode manager.
///
/// Used to enable or disable maintenance mode.
#[derive(Clone)]
pub struct MaintenanceModeManager {
kv_backend: KvBackendRef,
}
impl MaintenanceModeManager {
pub fn new(kv_backend: KvBackendRef) -> Self {
Self { kv_backend }
}
/// Enables maintenance mode.
pub async fn set_maintenance_mode(&self) -> Result<()> {
let req = PutRequest {
key: Vec::from(MAINTENANCE_KEY),
value: vec![],
prev_kv: false,
};
self.kv_backend.put(req).await?;
Ok(())
}
/// Unsets maintenance mode.
pub async fn unset_maintenance_mode(&self) -> Result<()> {
self.kv_backend
.delete(MAINTENANCE_KEY.as_bytes(), false)
.await?;
Ok(())
}
/// Returns true if maintenance mode is enabled.
pub async fn maintenance_mode(&self) -> Result<bool> {
self.kv_backend.exists(MAINTENANCE_KEY.as_bytes()).await
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use crate::key::maintenance::MaintenanceModeManager;
use crate::kv_backend::memory::MemoryKvBackend;
#[tokio::test]
async fn test_maintenance_mode_manager() {
let maintenance_mode_manager = Arc::new(MaintenanceModeManager::new(Arc::new(
MemoryKvBackend::new(),
)));
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
maintenance_mode_manager
.set_maintenance_mode()
.await
.unwrap();
assert!(maintenance_mode_manager.maintenance_mode().await.unwrap());
maintenance_mode_manager
.unset_maintenance_mode()
.await
.unwrap();
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
}
}

View File

@@ -1,125 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Schema-level metadata manager.
use std::sync::Arc;
use snafu::OptionExt;
use store_api::storage::TableId;
use crate::error::TableInfoNotFoundSnafu;
use crate::key::schema_name::{SchemaManager, SchemaNameKey};
use crate::key::table_info::{TableInfoManager, TableInfoManagerRef};
use crate::kv_backend::KvBackendRef;
use crate::{error, SchemaOptions};
pub type SchemaMetadataManagerRef = Arc<SchemaMetadataManager>;
pub struct SchemaMetadataManager {
table_info_manager: TableInfoManagerRef,
schema_manager: SchemaManager,
#[cfg(any(test, feature = "testing"))]
kv_backend: KvBackendRef,
}
impl SchemaMetadataManager {
/// Creates a new database meta
#[cfg(not(any(test, feature = "testing")))]
pub fn new(kv_backend: KvBackendRef) -> Self {
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
let schema_manager = SchemaManager::new(kv_backend);
Self {
table_info_manager,
schema_manager,
}
}
/// Creates a new database meta
#[cfg(any(test, feature = "testing"))]
pub fn new(kv_backend: KvBackendRef) -> Self {
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
let schema_manager = SchemaManager::new(kv_backend.clone());
Self {
table_info_manager,
schema_manager,
kv_backend,
}
}
/// Gets schema options by table id.
pub async fn get_schema_options_by_table_id(
&self,
table_id: TableId,
) -> error::Result<Option<SchemaOptions>> {
let table_info = self
.table_info_manager
.get(table_id)
.await?
.with_context(|| TableInfoNotFoundSnafu {
table: format!("table id: {}", table_id),
})?;
let key = SchemaNameKey::new(
&table_info.table_info.catalog_name,
&table_info.table_info.schema_name,
);
self.schema_manager
.get(key)
.await
.map(|v| v.map(|v| v.into_inner()))
}
#[cfg(any(test, feature = "testing"))]
pub async fn register_region_table_info(
&self,
table_id: TableId,
table_name: &str,
schema_name: &str,
catalog_name: &str,
schema_value: Option<crate::key::schema_name::SchemaNameValue>,
) {
use table::metadata::{RawTableInfo, TableType};
let value = crate::key::table_info::TableInfoValue::new(RawTableInfo {
ident: Default::default(),
name: table_name.to_string(),
desc: None,
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
meta: Default::default(),
table_type: TableType::Base,
});
let (txn, _) = self
.table_info_manager
.build_create_txn(table_id, &value)
.unwrap();
let resp = self.kv_backend.txn(txn).await.unwrap();
assert!(resp.succeeded, "Failed to create table metadata");
let key = SchemaNameKey {
catalog: catalog_name,
schema: schema_name,
};
self.schema_manager
.create(key, schema_value, false)
.await
.expect("Failed to create schema metadata");
common_telemetry::info!(
"Register table: {}, id: {}, schema: {}, catalog: {}",
table_name,
table_id,
schema_name,
catalog_name
);
}
}

View File

@@ -21,14 +21,10 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures::stream::BoxStream;
use humantime_serde::re::humantime;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use snafu::{OptionExt, ResultExt};
use super::txn_helper::TxnOpGetResponseSet;
use super::DeserializedValueWithBytes;
use crate::ensure_values;
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::RangeRequest;
@@ -175,8 +171,6 @@ pub struct SchemaManager {
kv_backend: KvBackendRef,
}
pub type SchemaNameDecodeResult = Result<Option<DeserializedValueWithBytes<SchemaNameValue>>>;
impl SchemaManager {
pub fn new(kv_backend: KvBackendRef) -> Self {
Self { kv_backend }
@@ -210,15 +204,11 @@ impl SchemaManager {
self.kv_backend.exists(&raw_key).await
}
pub async fn get(
&self,
schema: SchemaNameKey<'_>,
) -> Result<Option<DeserializedValueWithBytes<SchemaNameValue>>> {
pub async fn get(&self, schema: SchemaNameKey<'_>) -> Result<Option<SchemaNameValue>> {
let raw_key = schema.to_bytes();
self.kv_backend
.get(&raw_key)
.await?
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
let value = self.kv_backend.get(&raw_key).await?;
value
.and_then(|v| SchemaNameValue::try_from_raw_value(v.value.as_ref()).transpose())
.transpose()
}
@@ -230,54 +220,6 @@ impl SchemaManager {
Ok(())
}
pub(crate) fn build_update_txn(
&self,
schema: SchemaNameKey<'_>,
current_schema_value: &DeserializedValueWithBytes<SchemaNameValue>,
new_schema_value: &SchemaNameValue,
) -> Result<(
Txn,
impl FnOnce(&mut TxnOpGetResponseSet) -> SchemaNameDecodeResult,
)> {
let raw_key = schema.to_bytes();
let raw_value = current_schema_value.get_raw_bytes();
let new_raw_value: Vec<u8> = new_schema_value.try_as_raw_value()?;
let txn = Txn::compare_and_put(raw_key.clone(), raw_value, new_raw_value);
Ok((
txn,
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
))
}
/// Updates a [SchemaNameKey].
pub async fn update(
&self,
schema: SchemaNameKey<'_>,
current_schema_value: &DeserializedValueWithBytes<SchemaNameValue>,
new_schema_value: &SchemaNameValue,
) -> Result<()> {
let (txn, on_failure) =
self.build_update_txn(schema, current_schema_value, new_schema_value)?;
let mut r = self.kv_backend.txn(txn).await?;
if !r.succeeded {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
let remote_schema_value = on_failure(&mut set)?
.context(error::UnexpectedSnafu {
err_msg:
"Reads the empty schema name value in comparing operation of updating schema name value",
})?
.into_inner();
let op_name = "the updating schema name value";
ensure_values!(&remote_schema_value, new_schema_value, op_name);
}
Ok(())
}
/// Returns a schema stream, it lists all schemas belong to the target `catalog`.
pub fn schema_names(&self, catalog: &str) -> BoxStream<'static, Result<String>> {
let start_key = SchemaNameKey::range_start_key(catalog);
@@ -364,42 +306,4 @@ mod tests {
assert!(!manager.exists(wrong_schema_key).await.unwrap());
}
#[tokio::test]
async fn test_update_schema_value() {
let manager = SchemaManager::new(Arc::new(MemoryKvBackend::default()));
let schema_key = SchemaNameKey::new("my-catalog", "my-schema");
manager.create(schema_key, None, false).await.unwrap();
let current_schema_value = manager.get(schema_key).await.unwrap().unwrap();
let new_schema_value = SchemaNameValue {
ttl: Some(Duration::from_secs(10)),
};
manager
.update(schema_key, &current_schema_value, &new_schema_value)
.await
.unwrap();
// Update with the same value, should be ok
manager
.update(schema_key, &current_schema_value, &new_schema_value)
.await
.unwrap();
let new_schema_value = SchemaNameValue {
ttl: Some(Duration::from_secs(40)),
};
let incorrect_schema_value = SchemaNameValue {
ttl: Some(Duration::from_secs(20)),
}
.try_as_raw_value()
.unwrap();
let incorrect_schema_value =
DeserializedValueWithBytes::from_inner_slice(&incorrect_schema_value).unwrap();
manager
.update(schema_key, &incorrect_schema_value, &new_schema_value)
.await
.unwrap_err();
}
}

View File

@@ -134,7 +134,6 @@ impl TableInfoValue {
}
pub type TableInfoManagerRef = Arc<TableInfoManager>;
#[derive(Clone)]
pub struct TableInfoManager {
kv_backend: KvBackendRef,

View File

@@ -54,7 +54,4 @@ pub type DatanodeId = u64;
// The id of the flownode.
pub type FlownodeId = u64;
/// Schema options.
pub type SchemaOptions = key::schema_name::SchemaNameValue;
pub use instruction::RegionIdent;

View File

@@ -14,29 +14,25 @@
use std::collections::{HashMap, HashSet};
use std::result;
use std::time::Duration;
use api::v1::alter_database_expr::Kind as PbAlterDatabaseKind;
use api::v1::meta::ddl_task_request::Task;
use api::v1::meta::{
AlterDatabaseTask as PbAlterDatabaseTask, AlterTableTask as PbAlterTableTask,
AlterTableTasks as PbAlterTableTasks, CreateDatabaseTask as PbCreateDatabaseTask,
CreateFlowTask as PbCreateFlowTask, CreateTableTask as PbCreateTableTask,
CreateTableTasks as PbCreateTableTasks, CreateViewTask as PbCreateViewTask,
DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
DropDatabaseTask as PbDropDatabaseTask, DropFlowTask as PbDropFlowTask,
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks,
DropViewTask as PbDropViewTask, Partition, ProcedureId,
AlterTableTask as PbAlterTableTask, AlterTableTasks as PbAlterTableTasks,
CreateDatabaseTask as PbCreateDatabaseTask, CreateFlowTask as PbCreateFlowTask,
CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
CreateViewTask as PbCreateViewTask, DdlTaskRequest as PbDdlTaskRequest,
DdlTaskResponse as PbDdlTaskResponse, DropDatabaseTask as PbDropDatabaseTask,
DropFlowTask as PbDropFlowTask, DropTableTask as PbDropTableTask,
DropTableTasks as PbDropTableTasks, DropViewTask as PbDropViewTask, Partition, ProcedureId,
TruncateTableTask as PbTruncateTableTask,
};
use api::v1::{
AlterDatabaseExpr, AlterTableExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr,
CreateViewExpr, DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
Option as PbOption, QueryContext as PbQueryContext, TruncateTableExpr,
AlterExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr,
DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
QueryContext as PbQueryContext, TruncateTableExpr,
};
use base64::engine::general_purpose;
use base64::Engine as _;
use humantime_serde::re::humantime;
use prost::Message;
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DefaultOnNull};
@@ -46,7 +42,7 @@ use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table::table_reference::TableReference;
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
use crate::error::{self, Result};
use crate::key::FlowId;
/// DDL tasks
@@ -61,7 +57,6 @@ pub enum DdlTask {
AlterLogicalTables(Vec<AlterTableTask>),
CreateDatabase(CreateDatabaseTask),
DropDatabase(DropDatabaseTask),
AlterDatabase(AlterDatabaseTask),
CreateFlow(CreateFlowTask),
DropFlow(DropFlowTask),
CreateView(CreateViewTask),
@@ -104,7 +99,7 @@ impl DdlTask {
}
/// Creates a [`DdlTask`] to alter several logical tables.
pub fn new_alter_logical_tables(table_data: Vec<AlterTableExpr>) -> Self {
pub fn new_alter_logical_tables(table_data: Vec<AlterExpr>) -> Self {
DdlTask::AlterLogicalTables(
table_data
.into_iter()
@@ -154,13 +149,8 @@ impl DdlTask {
})
}
/// Creates a [`DdlTask`] to alter a database.
pub fn new_alter_database(alter_expr: AlterDatabaseExpr) -> Self {
DdlTask::AlterDatabase(AlterDatabaseTask { alter_expr })
}
/// Creates a [`DdlTask`] to alter a table.
pub fn new_alter_table(alter_table: AlterTableExpr) -> Self {
pub fn new_alter_table(alter_table: AlterExpr) -> Self {
DdlTask::AlterTable(AlterTableTask { alter_table })
}
@@ -233,9 +223,6 @@ impl TryFrom<Task> for DdlTask {
Task::DropDatabaseTask(drop_database) => {
Ok(DdlTask::DropDatabase(drop_database.try_into()?))
}
Task::AlterDatabaseTask(alter_database) => {
Ok(DdlTask::AlterDatabase(alter_database.try_into()?))
}
Task::CreateFlowTask(create_flow) => Ok(DdlTask::CreateFlow(create_flow.try_into()?)),
Task::DropFlowTask(drop_flow) => Ok(DdlTask::DropFlow(drop_flow.try_into()?)),
Task::CreateViewTask(create_view) => Ok(DdlTask::CreateView(create_view.try_into()?)),
@@ -285,7 +272,6 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
}
DdlTask::CreateDatabase(task) => Task::CreateDatabaseTask(task.try_into()?),
DdlTask::DropDatabase(task) => Task::DropDatabaseTask(task.try_into()?),
DdlTask::AlterDatabase(task) => Task::AlterDatabaseTask(task.try_into()?),
DdlTask::CreateFlow(task) => Task::CreateFlowTask(task.into()),
DdlTask::DropFlow(task) => Task::DropFlowTask(task.into()),
DdlTask::CreateView(task) => Task::CreateViewTask(task.try_into()?),
@@ -694,8 +680,7 @@ impl<'de> Deserialize<'de> for CreateTableTask {
#[derive(Debug, PartialEq, Clone)]
pub struct AlterTableTask {
// TODO(CookiePieWw): Replace proto struct with user-defined struct
pub alter_table: AlterTableExpr,
pub alter_table: AlterExpr,
}
impl AlterTableTask {
@@ -947,125 +932,6 @@ impl TryFrom<DropDatabaseTask> for PbDropDatabaseTask {
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct AlterDatabaseTask {
pub alter_expr: AlterDatabaseExpr,
}
impl TryFrom<AlterDatabaseTask> for PbAlterDatabaseTask {
type Error = error::Error;
fn try_from(task: AlterDatabaseTask) -> Result<Self> {
Ok(PbAlterDatabaseTask {
task: Some(task.alter_expr),
})
}
}
impl TryFrom<PbAlterDatabaseTask> for AlterDatabaseTask {
type Error = error::Error;
fn try_from(pb: PbAlterDatabaseTask) -> Result<Self> {
let alter_expr = pb.task.context(error::InvalidProtoMsgSnafu {
err_msg: "expected alter database",
})?;
Ok(AlterDatabaseTask { alter_expr })
}
}
impl TryFrom<PbAlterDatabaseKind> for AlterDatabaseKind {
type Error = error::Error;
fn try_from(pb: PbAlterDatabaseKind) -> Result<Self> {
match pb {
PbAlterDatabaseKind::SetDatabaseOptions(options) => {
Ok(AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(
options
.set_database_options
.into_iter()
.map(SetDatabaseOption::try_from)
.collect::<Result<Vec<_>>>()?,
)))
}
PbAlterDatabaseKind::UnsetDatabaseOptions(options) => Ok(
AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(
options
.keys
.iter()
.map(|key| UnsetDatabaseOption::try_from(key.as_str()))
.collect::<Result<Vec<_>>>()?,
)),
),
}
}
}
const TTL_KEY: &str = "ttl";
impl TryFrom<PbOption> for SetDatabaseOption {
type Error = error::Error;
fn try_from(PbOption { key, value }: PbOption) -> Result<Self> {
match key.to_ascii_lowercase().as_str() {
TTL_KEY => {
let ttl = if value.is_empty() {
Duration::from_secs(0)
} else {
humantime::parse_duration(&value)
.map_err(|_| InvalidSetDatabaseOptionSnafu { key, value }.build())?
};
Ok(SetDatabaseOption::Ttl(ttl))
}
_ => InvalidSetDatabaseOptionSnafu { key, value }.fail(),
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum SetDatabaseOption {
Ttl(Duration),
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum UnsetDatabaseOption {
Ttl,
}
impl TryFrom<&str> for UnsetDatabaseOption {
type Error = error::Error;
fn try_from(key: &str) -> Result<Self> {
match key.to_ascii_lowercase().as_str() {
TTL_KEY => Ok(UnsetDatabaseOption::Ttl),
_ => InvalidUnsetDatabaseOptionSnafu { key }.fail(),
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct SetDatabaseOptions(pub Vec<SetDatabaseOption>);
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct UnsetDatabaseOptions(pub Vec<UnsetDatabaseOption>);
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum AlterDatabaseKind {
SetDatabaseOptions(SetDatabaseOptions),
UnsetDatabaseOptions(UnsetDatabaseOptions),
}
impl AlterDatabaseTask {
pub fn catalog(&self) -> &str {
&self.alter_expr.catalog_name
}
pub fn schema(&self) -> &str {
&self.alter_expr.catalog_name
}
}
/// Create flow
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateFlowTask {
@@ -1252,7 +1118,7 @@ impl From<QueryContext> for PbQueryContext {
mod tests {
use std::sync::Arc;
use api::v1::{AlterTableExpr, ColumnDef, CreateTableExpr, SemanticType};
use api::v1::{AlterExpr, ColumnDef, CreateTableExpr, SemanticType};
use datatypes::schema::{ColumnSchema, RawSchema, SchemaBuilder};
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
use store_api::storage::ConcreteDataType;
@@ -1280,7 +1146,7 @@ mod tests {
#[test]
fn test_basic_ser_de_alter_table_task() {
let task = AlterTableTask {
alter_table: AlterTableExpr::default(),
alter_table: AlterExpr::default(),
};
let output = serde_json::to_vec(&task).unwrap();

View File

@@ -1,13 +0,0 @@
[package]
name = "common-options"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
common-grpc.workspace = true
humantime-serde.workspace = true
serde.workspace = true
[lints]
workspace = true

View File

@@ -245,14 +245,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid vector string: {}", vec_str))]
InvalidVectorString {
vec_str: String,
source: DataTypeError,
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -281,8 +273,7 @@ impl ErrorExt for Error {
| Error::IntoVector { source, .. }
| Error::FromScalarValue { source, .. }
| Error::ConvertArrowSchema { source, .. }
| Error::FromArrowArray { source, .. }
| Error::InvalidVectorString { source, .. } => source.status_code(),
| Error::FromArrowArray { source, .. } => source.status_code(),
Error::MissingTableMutationHandler { .. }
| Error::MissingProcedureServiceHandler { .. }

View File

@@ -20,7 +20,6 @@ pin-project.workspace = true
serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
tokio.workspace = true
[dev-dependencies]
tokio.workspace = true

View File

@@ -161,13 +161,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Stream timeout"))]
StreamTimeout {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: tokio::time::error::Elapsed,
},
}
impl ErrorExt for Error {
@@ -197,8 +190,6 @@ impl ErrorExt for Error {
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
source.status_code()
}
Error::StreamTimeout { .. } => StatusCode::Cancelled,
}
}

View File

@@ -192,6 +192,7 @@ pub fn init_global_logging(
if opts.log_format == LogFormat::Json {
Some(
Layer::new()
.with_thread_ids(true)
.json()
.with_writer(writer)
.with_ansi(atty::is(atty::Stream::Stdout))
@@ -200,6 +201,7 @@ pub fn init_global_logging(
} else {
Some(
Layer::new()
.with_thread_ids(true)
.with_writer(writer)
.with_ansi(atty::is(atty::Stream::Stdout))
.boxed(),
@@ -228,13 +230,20 @@ pub fn init_global_logging(
if opts.log_format == LogFormat::Json {
Some(
Layer::new()
.with_thread_ids(true)
.json()
.with_writer(writer)
.with_ansi(false)
.boxed(),
)
} else {
Some(Layer::new().with_writer(writer).with_ansi(false).boxed())
Some(
Layer::new()
.with_thread_ids(true)
.with_writer(writer)
.with_ansi(false)
.boxed(),
)
}
} else {
None
@@ -260,6 +269,7 @@ pub fn init_global_logging(
Some(
Layer::new()
.json()
.with_thread_ids(true)
.with_writer(writer)
.with_ansi(false)
.with_filter(filter::LevelFilter::ERROR)
@@ -268,6 +278,7 @@ pub fn init_global_logging(
} else {
Some(
Layer::new()
.with_thread_ids(true)
.with_writer(writer)
.with_ansi(false)
.with_filter(filter::LevelFilter::ERROR)

View File

@@ -427,8 +427,7 @@ mod test {
common_telemetry::init_default_ut_logging();
let mut region_server = mock_region_server();
let mut engine_env = TestEnv::with_prefix("region-alive-keeper");
let engine = engine_env.create_engine(MitoConfig::default()).await;
let engine = Arc::new(engine);
let engine = Arc::new(engine_env.create_engine(MitoConfig::default()).await);
region_server.register_engine(engine.clone());
let alive_keeper = Arc::new(RegionAliveKeeper::new(region_server.clone(), 100));

View File

@@ -14,8 +14,6 @@
//! Datanode configurations
use core::time::Duration;
use common_base::readable_size::ReadableSize;
use common_base::secrets::{ExposeSecret, SecretString};
use common_config::Configurable;
@@ -32,7 +30,7 @@ use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
use servers::Mode;
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(1);
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::mb(256);
/// Default data home in file storage
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
@@ -114,38 +112,6 @@ pub struct ObjectStorageCacheConfig {
pub cache_capacity: Option<ReadableSize>,
}
/// The http client options to the storage.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct HttpClientConfig {
/// The maximum idle connection per host allowed in the pool.
pub(crate) pool_max_idle_per_host: u32,
/// The timeout for only the connect phase of a http client.
#[serde(with = "humantime_serde")]
pub(crate) connect_timeout: Duration,
/// The total request timeout, applied from when the request starts connecting until the response body has finished.
/// Also considered a total deadline.
#[serde(with = "humantime_serde")]
pub(crate) timeout: Duration,
/// The timeout for idle sockets being kept-alive.
#[serde(with = "humantime_serde")]
pub(crate) pool_idle_timeout: Duration,
}
impl Default for HttpClientConfig {
fn default() -> Self {
Self {
pool_max_idle_per_host: 1024,
connect_timeout: Duration::from_secs(30),
timeout: Duration::from_secs(30),
pool_idle_timeout: Duration::from_secs(90),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct S3Config {
@@ -160,7 +126,6 @@ pub struct S3Config {
pub region: Option<String>,
#[serde(flatten)]
pub cache: ObjectStorageCacheConfig,
pub http_client: HttpClientConfig,
}
impl PartialEq for S3Config {
@@ -173,7 +138,6 @@ impl PartialEq for S3Config {
&& self.endpoint == other.endpoint
&& self.region == other.region
&& self.cache == other.cache
&& self.http_client == other.http_client
}
}
@@ -190,7 +154,6 @@ pub struct OssConfig {
pub endpoint: String,
#[serde(flatten)]
pub cache: ObjectStorageCacheConfig,
pub http_client: HttpClientConfig,
}
impl PartialEq for OssConfig {
@@ -202,7 +165,6 @@ impl PartialEq for OssConfig {
&& self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
&& self.endpoint == other.endpoint
&& self.cache == other.cache
&& self.http_client == other.http_client
}
}
@@ -220,7 +182,6 @@ pub struct AzblobConfig {
pub sas_token: Option<String>,
#[serde(flatten)]
pub cache: ObjectStorageCacheConfig,
pub http_client: HttpClientConfig,
}
impl PartialEq for AzblobConfig {
@@ -233,7 +194,6 @@ impl PartialEq for AzblobConfig {
&& self.endpoint == other.endpoint
&& self.sas_token == other.sas_token
&& self.cache == other.cache
&& self.http_client == other.http_client
}
}
@@ -251,7 +211,6 @@ pub struct GcsConfig {
pub endpoint: String,
#[serde(flatten)]
pub cache: ObjectStorageCacheConfig,
pub http_client: HttpClientConfig,
}
impl PartialEq for GcsConfig {
@@ -264,7 +223,6 @@ impl PartialEq for GcsConfig {
&& self.credential.expose_secret() == other.credential.expose_secret()
&& self.endpoint == other.endpoint
&& self.cache == other.cache
&& self.http_client == other.http_client
}
}
@@ -279,7 +237,6 @@ impl Default for S3Config {
endpoint: Option::default(),
region: Option::default(),
cache: ObjectStorageCacheConfig::default(),
http_client: HttpClientConfig::default(),
}
}
}
@@ -294,7 +251,6 @@ impl Default for OssConfig {
access_key_secret: SecretString::from(String::default()),
endpoint: String::default(),
cache: ObjectStorageCacheConfig::default(),
http_client: HttpClientConfig::default(),
}
}
}
@@ -310,7 +266,6 @@ impl Default for AzblobConfig {
endpoint: String::default(),
sas_token: Option::default(),
cache: ObjectStorageCacheConfig::default(),
http_client: HttpClientConfig::default(),
}
}
}
@@ -326,7 +281,6 @@ impl Default for GcsConfig {
credential: SecretString::from(String::default()),
endpoint: String::default(),
cache: ObjectStorageCacheConfig::default(),
http_client: HttpClientConfig::default(),
}
}
}

View File

@@ -18,13 +18,11 @@ use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use catalog::kvbackend::CachedKvBackendBuilder;
use catalog::memory::MemoryCatalogManager;
use common_base::Plugins;
use common_error::ext::BoxedError;
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_meta::key::datanode_table::{DatanodeTableManager, DatanodeTableValue};
use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::wal_options_allocator::prepare_wal_options;
pub use common_procedure::options::ProcedureConfig;
@@ -209,13 +207,7 @@ impl DatanodeBuilder {
(Box::new(NoopRegionServerEventListener) as _, None)
};
let cached_kv_backend = Arc::new(CachedKvBackendBuilder::new(kv_backend.clone()).build());
let schema_metadata_manager =
Arc::new(SchemaMetadataManager::new(cached_kv_backend.clone()));
let region_server = self
.new_region_server(schema_metadata_manager, region_event_listener)
.await?;
let region_server = self.new_region_server(region_event_listener).await?;
let datanode_table_manager = DatanodeTableManager::new(kv_backend.clone());
let table_values = datanode_table_manager
@@ -243,15 +235,7 @@ impl DatanodeBuilder {
}
let heartbeat_task = if let Some(meta_client) = meta_client {
Some(
HeartbeatTask::try_new(
&self.opts,
region_server.clone(),
meta_client,
cached_kv_backend,
)
.await?,
)
Some(HeartbeatTask::try_new(&self.opts, region_server.clone(), meta_client).await?)
} else {
None
};
@@ -328,7 +312,6 @@ impl DatanodeBuilder {
async fn new_region_server(
&self,
schema_metadata_manager: SchemaMetadataManagerRef,
event_listener: RegionServerEventListenerRef,
) -> Result<RegionServer> {
let opts: &DatanodeOptions = &self.opts;
@@ -357,13 +340,8 @@ impl DatanodeBuilder {
);
let object_store_manager = Self::build_object_store_manager(&opts.storage).await?;
let engines = Self::build_store_engines(
opts,
object_store_manager,
schema_metadata_manager,
self.plugins.clone(),
)
.await?;
let engines =
Self::build_store_engines(opts, object_store_manager, self.plugins.clone()).await?;
for engine in engines {
region_server.register_engine(engine);
}
@@ -377,7 +355,6 @@ impl DatanodeBuilder {
async fn build_store_engines(
opts: &DatanodeOptions,
object_store_manager: ObjectStoreManagerRef,
schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<Vec<RegionEngineRef>> {
let mut engines = vec![];
@@ -388,7 +365,6 @@ impl DatanodeBuilder {
opts,
object_store_manager.clone(),
config.clone(),
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
@@ -414,7 +390,6 @@ impl DatanodeBuilder {
opts: &DatanodeOptions,
object_store_manager: ObjectStoreManagerRef,
config: MitoConfig,
schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<MitoEngine> {
let mito_engine = match &opts.wal {
@@ -424,7 +399,6 @@ impl DatanodeBuilder {
Self::build_raft_engine_log_store(&opts.storage.data_home, raft_engine_config)
.await?,
object_store_manager,
schema_metadata_manager,
plugins,
)
.await
@@ -455,7 +429,6 @@ impl DatanodeBuilder {
config,
Self::build_kafka_log_store(kafka_config, global_index_collector).await?,
object_store_manager,
schema_metadata_manager,
plugins,
)
.await

View File

@@ -18,7 +18,6 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::{HeartbeatRequest, NodeInfo, Peer, RegionRole, RegionStat};
use catalog::kvbackend::CachedKvBackend;
use common_meta::datanode::REGION_STATISTIC_KEY;
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
@@ -40,7 +39,6 @@ use crate::alive_keeper::RegionAliveKeeper;
use crate::config::DatanodeOptions;
use crate::error::{self, MetaClientInitSnafu, Result};
use crate::event_listener::RegionServerEventReceiver;
use crate::heartbeat::handler::cache_invalidator::InvalidateSchemaCacheHandler;
use crate::metrics::{self, HEARTBEAT_RECV_COUNT, HEARTBEAT_SENT_COUNT};
use crate::region_server::RegionServer;
@@ -72,7 +70,6 @@ impl HeartbeatTask {
opts: &DatanodeOptions,
region_server: RegionServer,
meta_client: MetaClientRef,
cache_kv_backend: Arc<CachedKvBackend>,
) -> Result<Self> {
let region_alive_keeper = Arc::new(RegionAliveKeeper::new(
region_server.clone(),
@@ -82,7 +79,6 @@ impl HeartbeatTask {
region_alive_keeper.clone(),
Arc::new(ParseMailboxMessageHandler),
Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())),
Arc::new(InvalidateSchemaCacheHandler::new(cache_kv_backend)),
]));
Ok(Self {

View File

@@ -24,7 +24,6 @@ use futures::future::BoxFuture;
use snafu::OptionExt;
use store_api::storage::RegionId;
pub(crate) mod cache_invalidator;
mod close_region;
mod downgrade_region;
mod open_region;
@@ -135,7 +134,7 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
}
});
Ok(HandleControl::Continue)
Ok(HandleControl::Done)
}
}
@@ -286,7 +285,7 @@ mod tests {
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Continue);
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
@@ -341,7 +340,7 @@ mod tests {
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Continue);
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
@@ -374,7 +373,7 @@ mod tests {
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Continue);
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
@@ -421,7 +420,7 @@ mod tests {
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Continue);
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
@@ -443,7 +442,7 @@ mod tests {
});
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
assert_matches!(control, HandleControl::Continue);
assert_matches!(control, HandleControl::Done);
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();

View File

@@ -1,167 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Schema cache invalidator handler
use std::sync::Arc;
use async_trait::async_trait;
use catalog::kvbackend::CachedKvBackend;
use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use common_meta::instruction::{CacheIdent, Instruction};
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::MetadataKey;
use common_telemetry::debug;
#[derive(Clone)]
pub(crate) struct InvalidateSchemaCacheHandler {
cached_kv_backend: Arc<CachedKvBackend>,
}
#[async_trait]
impl HeartbeatResponseHandler for InvalidateSchemaCacheHandler {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
matches!(
ctx.incoming_message.as_ref(),
Some((_, Instruction::InvalidateCaches(_)))
)
}
async fn handle(
&self,
ctx: &mut HeartbeatResponseHandlerContext,
) -> common_meta::error::Result<HandleControl> {
let Some((_, Instruction::InvalidateCaches(caches))) = ctx.incoming_message.take() else {
unreachable!("InvalidateSchemaCacheHandler: should be guarded by 'is_acceptable'")
};
debug!(
"InvalidateSchemaCacheHandler: invalidating caches: {:?}",
caches
);
for cache in caches {
let CacheIdent::SchemaName(schema_name) = cache else {
continue;
};
let key: SchemaNameKey = (&schema_name).into();
let key_bytes = key.to_bytes();
// invalidate cache
self.cached_kv_backend.invalidate_key(&key_bytes).await;
}
Ok(HandleControl::Done)
}
}
impl InvalidateSchemaCacheHandler {
pub fn new(cached_kv_backend: Arc<CachedKvBackend>) -> Self {
Self { cached_kv_backend }
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::HeartbeatResponse;
use catalog::kvbackend::CachedKvBackendBuilder;
use common_meta::heartbeat::handler::{
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
use common_meta::instruction::{CacheIdent, Instruction};
use common_meta::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue};
use common_meta::key::{MetadataKey, SchemaMetadataManager};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackend;
use common_meta::rpc::store::PutRequest;
use crate::heartbeat::handler::cache_invalidator::InvalidateSchemaCacheHandler;
#[tokio::test]
async fn test_invalidate_schema_cache_handler() {
let inner_kv = Arc::new(MemoryKvBackend::default());
let cached_kv = Arc::new(CachedKvBackendBuilder::new(inner_kv.clone()).build());
let schema_metadata_manager = SchemaMetadataManager::new(cached_kv.clone());
let schema_name = "test_schema";
let catalog_name = "test_catalog";
schema_metadata_manager
.register_region_table_info(
1,
"test_table",
schema_name,
catalog_name,
Some(SchemaNameValue {
ttl: Some(Duration::from_secs(1)),
}),
)
.await;
schema_metadata_manager
.get_schema_options_by_table_id(1)
.await
.unwrap();
let schema_key = SchemaNameKey::new(catalog_name, schema_name).to_bytes();
let new_schema_value = SchemaNameValue {
ttl: Some(Duration::from_secs(3)),
}
.try_as_raw_value()
.unwrap();
inner_kv
.put(PutRequest {
key: schema_key.clone(),
value: new_schema_value,
prev_kv: false,
})
.await
.unwrap();
let executor = Arc::new(HandlerGroupExecutor::new(vec![Arc::new(
InvalidateSchemaCacheHandler::new(cached_kv),
)]));
let (tx, _) = tokio::sync::mpsc::channel(8);
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
// removes a valid key
let response = HeartbeatResponse::default();
let mut ctx: HeartbeatResponseHandlerContext =
HeartbeatResponseHandlerContext::new(mailbox, response);
ctx.incoming_message = Some((
MessageMeta::new_test(1, "hi", "foo", "bar"),
Instruction::InvalidateCaches(vec![CacheIdent::SchemaName(SchemaName {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
})]),
));
executor.handle(ctx).await.unwrap();
assert_eq!(
Some(Duration::from_secs(3)),
SchemaNameValue::try_from_raw_value(
&inner_kv.get(&schema_key).await.unwrap().unwrap().value
)
.unwrap()
.unwrap()
.ttl
);
}
}

View File

@@ -74,10 +74,6 @@ impl HandlerContext {
// Ignores flush request
if !writable {
warn!(
"Region: {region_id} is not writable, flush_timeout: {:?}",
flush_timeout
);
return self.downgrade_to_follower_gracefully(region_id).await;
}

View File

@@ -1355,7 +1355,7 @@ mod tests {
}
#[tokio::test]
async fn test_region_server_parallelism() {
async fn test_region_server_parallism() {
let p = RegionServerParallelism::from_opts(2, Duration::from_millis(1)).unwrap();
let first_query = p.acquire().await;
assert!(first_query.is_ok());

View File

@@ -32,7 +32,7 @@ use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder};
use snafu::prelude::*;
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
use crate::error::{self, Result};
pub(crate) async fn new_raw_object_store(
@@ -177,7 +177,7 @@ pub(crate) fn clean_temp_dir(dir: &str) -> Result<()> {
Ok(())
}
pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient> {
pub(crate) fn build_http_client() -> Result<HttpClient> {
let http_builder = {
let mut builder = reqwest::ClientBuilder::new();
@@ -186,28 +186,25 @@ pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient>
let pool_max_idle_per_host = env::var("_GREPTIMEDB_HTTP_POOL_MAX_IDLE_PER_HOST")
.ok()
.and_then(|v| v.parse::<usize>().ok())
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_POOL_MAX_IDLE_PER_HOST' might be deprecated in the future. Please set it in the config file instead."))
.unwrap_or(config.pool_max_idle_per_host as usize);
.unwrap_or(usize::MAX);
builder = builder.pool_max_idle_per_host(pool_max_idle_per_host);
// Connect timeout default to 30s.
let connect_timeout = env::var("_GREPTIMEDB_HTTP_CONNECT_TIMEOUT")
.ok()
.and_then(|v| v.parse::<u64>().ok().map(Duration::from_secs))
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_CONNECT_TIMEOUT' might be deprecated in the future. Please set it in the config file instead."))
.unwrap_or(config.connect_timeout);
builder = builder.connect_timeout(connect_timeout);
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(30);
builder = builder.connect_timeout(Duration::from_secs(connect_timeout));
// Pool connection idle timeout default to 90s.
let idle_timeout = env::var("_GREPTIMEDB_HTTP_POOL_IDLE_TIMEOUT")
.ok()
.and_then(|v| v.parse::<u64>().ok().map(Duration::from_secs))
.inspect(|_| warn!("'_GREPTIMEDB_HTTP_POOL_IDLE_TIMEOUT' might be deprecated in the future. Please set it in the config file instead."))
.unwrap_or(config.pool_idle_timeout);
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(90);
builder = builder.pool_idle_timeout(idle_timeout);
builder = builder.pool_idle_timeout(Duration::from_secs(idle_timeout));
builder.timeout(config.timeout)
builder
};
HttpClient::build(http_builder).context(error::InitBackendSnafu)

View File

@@ -30,15 +30,13 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
azblob_config.container, &root
);
let client = build_http_client(&azblob_config.http_client)?;
let mut builder = Azblob::default()
.root(&root)
.container(&azblob_config.container)
.endpoint(&azblob_config.endpoint)
.account_name(azblob_config.account_name.expose_secret())
.account_key(azblob_config.account_key.expose_secret())
.http_client(client);
.http_client(build_http_client()?);
if let Some(token) = &azblob_config.sas_token {
builder = builder.sas_token(token);

View File

@@ -29,8 +29,6 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
gcs_config.bucket, &root
);
let client = build_http_client(&gcs_config.http_client);
let builder = Gcs::default()
.root(&root)
.bucket(&gcs_config.bucket)
@@ -38,7 +36,7 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
.credential_path(gcs_config.credential_path.expose_secret())
.credential(gcs_config.credential.expose_secret())
.endpoint(&gcs_config.endpoint)
.http_client(client?);
.http_client(build_http_client()?);
Ok(ObjectStore::new(builder)
.context(error::InitBackendSnafu)?

View File

@@ -29,15 +29,13 @@ pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<Objec
oss_config.bucket, &root
);
let client = build_http_client(&oss_config.http_client)?;
let builder = Oss::default()
.root(&root)
.bucket(&oss_config.bucket)
.endpoint(&oss_config.endpoint)
.access_key_id(oss_config.access_key_id.expose_secret())
.access_key_secret(oss_config.access_key_secret.expose_secret())
.http_client(client);
.http_client(build_http_client()?);
Ok(ObjectStore::new(builder)
.context(error::InitBackendSnafu)?

View File

@@ -30,14 +30,12 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
s3_config.bucket, &root
);
let client = build_http_client(&s3_config.http_client)?;
let mut builder = S3::default()
.root(&root)
.bucket(&s3_config.bucket)
.access_key_id(s3_config.access_key_id.expose_secret())
.secret_access_key(s3_config.secret_access_key.expose_secret())
.http_client(client);
.http_client(build_http_client()?);
if s3_config.endpoint.is_some() {
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());

View File

@@ -33,5 +33,3 @@ paste = "1.0"
serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
sqlparser.workspace = true
sqlparser_derive = "0.1"

View File

@@ -36,7 +36,7 @@ use crate::types::{
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, JsonType,
ListType, NullType, StringType, TimeMillisecondType, TimeType, TimestampMicrosecondType,
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType,
UInt16Type, UInt32Type, UInt64Type, UInt8Type, VectorType,
UInt16Type, UInt32Type, UInt64Type, UInt8Type,
};
use crate::value::Value;
use crate::vectors::MutableVector;
@@ -84,9 +84,6 @@ pub enum ConcreteDataType {
// JSON type:
Json(JsonType),
// Vector type:
Vector(VectorType),
}
impl fmt::Display for ConcreteDataType {
@@ -135,7 +132,6 @@ impl fmt::Display for ConcreteDataType {
ConcreteDataType::List(v) => write!(f, "{}", v.name()),
ConcreteDataType::Dictionary(v) => write!(f, "{}", v.name()),
ConcreteDataType::Json(v) => write!(f, "{}", v.name()),
ConcreteDataType::Vector(v) => write!(f, "{}", v.name()),
}
}
}
@@ -171,7 +167,6 @@ impl ConcreteDataType {
| ConcreteDataType::Decimal128(_)
| ConcreteDataType::Binary(_)
| ConcreteDataType::Json(_)
| ConcreteDataType::Vector(_)
)
}
@@ -230,10 +225,6 @@ impl ConcreteDataType {
matches!(self, ConcreteDataType::Json(_))
}
pub fn is_vector(&self) -> bool {
matches!(self, ConcreteDataType::Vector(_))
}
pub fn numerics() -> Vec<ConcreteDataType> {
vec![
ConcreteDataType::int8_datatype(),
@@ -343,20 +334,6 @@ impl ConcreteDataType {
}
}
pub fn as_json(&self) -> Option<JsonType> {
match self {
ConcreteDataType::Json(j) => Some(*j),
_ => None,
}
}
pub fn as_vector(&self) -> Option<VectorType> {
match self {
ConcreteDataType::Vector(v) => Some(*v),
_ => None,
}
}
/// Checks if the data type can cast to another data type.
pub fn can_arrow_type_cast_to(&self, to_type: &ConcreteDataType) -> bool {
let array = arrow_array::new_empty_array(&self.as_arrow_type());
@@ -587,14 +564,6 @@ impl ConcreteDataType {
pub fn decimal128_default_datatype() -> ConcreteDataType {
Self::decimal128_datatype(DECIMAL128_MAX_PRECISION, DECIMAL_DEFAULT_SCALE)
}
pub fn vector_datatype(dim: u32) -> ConcreteDataType {
ConcreteDataType::Vector(VectorType::new(dim))
}
pub fn vector_default_datatype() -> ConcreteDataType {
Self::vector_datatype(0)
}
}
/// Data type abstraction.
@@ -788,7 +757,6 @@ mod tests {
assert!(ConcreteDataType::duration_microsecond_datatype().is_stringifiable());
assert!(ConcreteDataType::duration_nanosecond_datatype().is_stringifiable());
assert!(ConcreteDataType::decimal128_datatype(10, 2).is_stringifiable());
assert!(ConcreteDataType::vector_default_datatype().is_stringifiable());
}
#[test]
@@ -941,9 +909,5 @@ mod tests {
.to_string(),
"Dictionary<Int32, String>"
);
assert_eq!(
ConcreteDataType::vector_datatype(3).to_string(),
"Vector(3)"
);
}
}

Some files were not shown because too many files have changed in this diff Show More