mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-03 20:02:54 +00:00
Compare commits
75 Commits
build/add-
...
v0.11.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03a28320d6 | ||
|
|
ce86ba3425 | ||
|
|
2fcb95f50a | ||
|
|
1b642ea6a9 | ||
|
|
b35221ccb6 | ||
|
|
bac7e7bac9 | ||
|
|
903da8f4cb | ||
|
|
c0f498b00c | ||
|
|
19373d806d | ||
|
|
3133f3fb4e | ||
|
|
8b944268da | ||
|
|
dc83b0aa15 | ||
|
|
2b699e735c | ||
|
|
7a3d6f2bd5 | ||
|
|
f9ebb58a12 | ||
|
|
c732016fa0 | ||
|
|
01a308fe6b | ||
|
|
cf0c84bed1 | ||
|
|
66c0445974 | ||
|
|
7d8b256942 | ||
|
|
5092f5f451 | ||
|
|
ff4c153d4b | ||
|
|
a51853846a | ||
|
|
51c6eafb16 | ||
|
|
5bdea1a755 | ||
|
|
bcadce3988 | ||
|
|
0f116c8501 | ||
|
|
c049ce6ab1 | ||
|
|
6308e86e21 | ||
|
|
36263830bb | ||
|
|
d931389a4c | ||
|
|
8bdef776b3 | ||
|
|
91e933517a | ||
|
|
a617e0dbef | ||
|
|
6130c70b63 | ||
|
|
fae141ad0a | ||
|
|
57f31d14c8 | ||
|
|
1cd6abb61f | ||
|
|
e3927ea6f7 | ||
|
|
a6571d3392 | ||
|
|
1255638e84 | ||
|
|
1578c004b0 | ||
|
|
5f8d849981 | ||
|
|
3029b47a89 | ||
|
|
14d997e2d1 | ||
|
|
0aab68c23b | ||
|
|
027284ed1b | ||
|
|
6a958e2c36 | ||
|
|
db345c92df | ||
|
|
55ced9aa71 | ||
|
|
3633f25d0c | ||
|
|
63bbfd04c7 | ||
|
|
2f260d8b27 | ||
|
|
4d8fe29ea8 | ||
|
|
dbb3f2d98d | ||
|
|
9926e3bc78 | ||
|
|
0dd02e93cf | ||
|
|
73e6bf399d | ||
|
|
4402f638cd | ||
|
|
c199604ece | ||
|
|
2b72e66536 | ||
|
|
7c135c0ef9 | ||
|
|
9289265f54 | ||
|
|
485782af51 | ||
|
|
4b263ef1cc | ||
|
|
08f59008cc | ||
|
|
a2852affeb | ||
|
|
cdba7b442f | ||
|
|
42bf7e9965 | ||
|
|
a70b4d7eba | ||
|
|
408013c22b | ||
|
|
22c8a7656b | ||
|
|
35898f0b2e | ||
|
|
1101e98651 | ||
|
|
0089cf1b4f |
@@ -8,7 +8,7 @@ inputs:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 3
|
||||
default: 1
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
@@ -58,7 +58,7 @@ runs:
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
|
||||
2
.github/cargo-blacklist.txt
vendored
Normal file
2
.github/cargo-blacklist.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
native-tls
|
||||
openssl
|
||||
14
.github/scripts/check-install-script.sh
vendored
Executable file
14
.github/scripts/check-install-script.sh
vendored
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
# Get the latest version of github.com/GreptimeTeam/greptimedb
|
||||
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
|
||||
|
||||
echo "Downloading the latest version: $VERSION"
|
||||
|
||||
# Download the install script
|
||||
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
|
||||
|
||||
# Execute the `greptime` command
|
||||
./greptime --version
|
||||
36
.github/workflows/dependency-check.yml
vendored
Normal file
36
.github/workflows/dependency-check.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Check Dependencies
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check-dependencies:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Rust
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
|
||||
- name: Run cargo tree
|
||||
run: cargo tree --prefix none > dependencies.txt
|
||||
|
||||
- name: Extract dependency names
|
||||
run: awk '{print $1}' dependencies.txt > dependency_names.txt
|
||||
|
||||
- name: Check for blacklisted crates
|
||||
run: |
|
||||
while read -r dep; do
|
||||
if grep -qFx "$dep" dependency_names.txt; then
|
||||
echo "Blacklisted crate '$dep' found in dependencies."
|
||||
exit 1
|
||||
fi
|
||||
done < .github/cargo-blacklist.txt
|
||||
echo "No blacklisted crates found."
|
||||
4
.github/workflows/nightly-ci.yml
vendored
4
.github/workflows/nightly-ci.yml
vendored
@@ -22,6 +22,10 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check install.sh
|
||||
run: ./.github/scripts/check-install-script.sh
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.10.0
|
||||
NEXT_RELEASE_VERSION: v0.11.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
|
||||
@@ -17,6 +17,6 @@ repos:
|
||||
- id: fmt
|
||||
- id: clippy
|
||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||
stages: [push]
|
||||
stages: [pre-push]
|
||||
- id: cargo-check
|
||||
args: ["--workspace", "--all-targets", "--all-features"]
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
@@ -30,7 +32,6 @@
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [xtang](https://github.com/xtang)
|
||||
|
||||
780
Cargo.lock
generated
780
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ members = [
|
||||
"src/auth",
|
||||
"src/cache",
|
||||
"src/catalog",
|
||||
"src/cli",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
@@ -40,6 +41,7 @@ members = [
|
||||
"src/flow",
|
||||
"src/frontend",
|
||||
"src/index",
|
||||
"src/log-query",
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
@@ -66,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.5"
|
||||
version = "0.11.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -122,7 +124,7 @@ etcd-client = "0.13"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "75c5fb569183bb3d0fa1023df9c2214df722b9b1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
|
||||
hex = "0.4"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
@@ -167,7 +169,6 @@ rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
@@ -200,6 +201,7 @@ api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
cli = { path = "src/cli" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd", default-features = false }
|
||||
common-base = { path = "src/common/base" }
|
||||
|
||||
62
README.md
62
README.md
@@ -56,7 +56,7 @@
|
||||
- [Project Status](#project-status)
|
||||
- [Join the community](#community)
|
||||
- [Contributing](#contributing)
|
||||
- [Extension](#extension )
|
||||
- [Tools & Extensions](#tools--extensions)
|
||||
- [License](#license)
|
||||
- [Acknowledgement](#acknowledgement)
|
||||
|
||||
@@ -66,31 +66,33 @@
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
|
||||
* **Unified all kinds of time series**
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
|
||||
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/continuous-aggregation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
|
||||
* **Cloud-Edge collaboration**
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|
||||
|
||||
* **Cloud-native distributed database**
|
||||
|
||||
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
|
||||
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer.
|
||||
|
||||
* **Performance and Cost-effective**
|
||||
|
||||
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary).
|
||||
|
||||
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||
* **Cloud-Edge Collaboration**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/).
|
||||
|
||||
* **Multi-protocol Ingestion, SQL & PromQL Ready**
|
||||
|
||||
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
|
||||
|
||||
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
|
||||
|
||||
## Try GreptimeDB
|
||||
|
||||
### 1. [GreptimePlay](https://greptime.com/playground)
|
||||
### 1. [Live Demo](https://greptime.com/playground)
|
||||
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
@@ -109,9 +111,18 @@ docker pull greptime/greptimedb
|
||||
Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
--rpc-addr 0.0.0.0:4001 \
|
||||
--mysql-addr 0.0.0.0:4002 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
```
|
||||
|
||||
Access the dashboard via `http://localhost:4000/dashboard`.
|
||||
|
||||
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||
|
||||
## Getting Started
|
||||
@@ -141,7 +152,7 @@ Run a standalone server:
|
||||
cargo run -- standalone start
|
||||
```
|
||||
|
||||
## Extension
|
||||
## Tools & Extensions
|
||||
|
||||
### Dashboard
|
||||
|
||||
@@ -158,14 +169,19 @@ cargo run -- standalone start
|
||||
|
||||
### Grafana Dashboard
|
||||
|
||||
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
||||
Our official Grafana dashboard for monitoring GreptimeDB is available at [grafana](grafana/README.md) directory.
|
||||
|
||||
## Project Status
|
||||
|
||||
The current version has not yet reached the standards for General Availability.
|
||||
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025.
|
||||
|
||||
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
|
||||
While in Beta, GreptimeDB is already:
|
||||
|
||||
* Being used in production by early adopters
|
||||
* Actively maintained with regular releases, [about version number](https://docs.greptime.com/nightly/reference/about-greptimedb-version)
|
||||
* Suitable for testing and evaluation
|
||||
|
||||
For production use, we recommend using the latest stable release.
|
||||
|
||||
## Community
|
||||
|
||||
@@ -184,12 +200,12 @@ In addition, you may:
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
## Commerial Support
|
||||
## Commercial Support
|
||||
|
||||
If you are running GreptimeDB OSS in your organization, we offer additional
|
||||
enterprise addons, installation service, training and consulting. [Contact
|
||||
enterprise add-ons, installation services, training, and consulting. [Contact
|
||||
us](https://greptime.com/contactus) and we will reach out to you with more
|
||||
detail of our commerial license.
|
||||
detail of our commercial license.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -109,6 +109,11 @@
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -126,12 +131,11 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
@@ -281,7 +285,7 @@
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
@@ -416,7 +420,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.<br/>The local file cache directory. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -432,6 +436,11 @@
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
@@ -449,12 +458,11 @@
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `1GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
|
||||
@@ -294,14 +294,14 @@ data_home = "/tmp/greptimedb/"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
|
||||
## The local file cache directory.
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "1GiB"
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
@@ -375,6 +375,23 @@ endpoint = "https://s3.amazonaws.com"
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
@@ -459,14 +476,14 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "1GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
@@ -475,12 +492,6 @@ experimental_write_cache_ttl = "8h"
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
## - `0`: using the default value (1/4 of cpu cores).
|
||||
## - `1`: scan in current thread.
|
||||
## - `n`: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ bind_addr = "127.0.0.1:3002"
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## Datanode selector type.
|
||||
## - `round_robin` (default value)
|
||||
|
||||
@@ -332,14 +332,14 @@ data_home = "/tmp/greptimedb/"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Cache configuration for object storage such as 'S3' etc. It is recommended to configure it when using object storage for better performance.
|
||||
## The local file cache directory.
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
cache_path = "/path/local_cache"
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "1GiB"
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
@@ -413,6 +413,23 @@ endpoint = "https://s3.amazonaws.com"
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
@@ -497,14 +514,14 @@ auto_flush_interval = "1h"
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache. It is recommended to enable it when using object storage for better performance.
|
||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
experimental_write_cache_size = "1GiB"
|
||||
experimental_write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
@@ -513,12 +530,6 @@ experimental_write_cache_ttl = "8h"
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
## - `0`: using the default value (1/4 of cpu cores).
|
||||
## - `1`: scan in current thread.
|
||||
## - `n`: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## HTTP API
|
||||
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
|
||||
```bash
|
||||
curl -s '0:4000/debug/prof/cpu' > /tmp/pprof.out
|
||||
curl -X POST -s '0:4000/debug/prof/cpu' > /tmp/pprof.out
|
||||
```
|
||||
|
||||
Then you can use `pprof` command with the protobuf file.
|
||||
@@ -13,10 +13,10 @@ go tool pprof -top /tmp/pprof.out
|
||||
|
||||
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
|
||||
```bash
|
||||
curl -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
|
||||
curl -X POST -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
|
||||
```
|
||||
|
||||
Sample at 49 Hertz, for 10 seconds, output report in text format.
|
||||
```bash
|
||||
curl -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
||||
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
|
||||
```
|
||||
|
||||
@@ -23,13 +23,13 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
```bash
|
||||
MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone start
|
||||
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
```bash
|
||||
curl localhost:4000/debug/prof/mem > greptime.hprof
|
||||
curl -X POST localhost:4000/debug/prof/mem > greptime.hprof
|
||||
```
|
||||
|
||||
You can periodically dump profiling data and compare them to find the delta memory usage.
|
||||
|
||||
@@ -5,6 +5,13 @@ GreptimeDB's official Grafana dashboard.
|
||||
|
||||
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
|
||||
|
||||
If you use Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
|
||||
|
||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
|
||||
|
||||
# How to use
|
||||
|
||||
## `greptimedb.json`
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
|
||||
set -ue
|
||||
|
||||
@@ -15,7 +15,7 @@ GITHUB_ORG=GreptimeTeam
|
||||
GITHUB_REPO=greptimedb
|
||||
BIN=greptime
|
||||
|
||||
function get_os_type() {
|
||||
get_os_type() {
|
||||
os_type="$(uname -s)"
|
||||
|
||||
case "$os_type" in
|
||||
@@ -31,7 +31,7 @@ function get_os_type() {
|
||||
esac
|
||||
}
|
||||
|
||||
function get_arch_type() {
|
||||
get_arch_type() {
|
||||
arch_type="$(uname -m)"
|
||||
|
||||
case "$arch_type" in
|
||||
@@ -53,7 +53,7 @@ function get_arch_type() {
|
||||
esac
|
||||
}
|
||||
|
||||
function download_artifact() {
|
||||
download_artifact() {
|
||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||
# Use the latest stable released version.
|
||||
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
|
||||
|
||||
@@ -527,13 +527,14 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
match request.expr {
|
||||
Some(Expr::CreateDatabase(_)) => "ddl.create_database",
|
||||
Some(Expr::CreateTable(_)) => "ddl.create_table",
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::AlterTable(_)) => "ddl.alter_table",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
|
||||
62
src/cache/src/lib.rs
vendored
62
src/cache/src/lib.rs
vendored
@@ -19,9 +19,9 @@ use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||
LayeredCacheRegistryBuilder,
|
||||
new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry,
|
||||
CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
@@ -37,9 +37,47 @@ pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||
pub const SCHEMA_CACHE_NAME: &str = "schema_cache";
|
||||
pub const TABLE_SCHEMA_NAME_CACHE_NAME: &str = "table_schema_name_cache";
|
||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
|
||||
|
||||
/// Builds cache registry for datanode, including:
|
||||
/// - Schema cache.
|
||||
/// - Table id to schema name cache.
|
||||
pub fn build_datanode_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
|
||||
// Builds table id schema name cache that never expires.
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY).build();
|
||||
let table_id_schema_cache = Arc::new(new_table_schema_cache(
|
||||
TABLE_SCHEMA_NAME_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds schema cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let schema_cache = Arc::new(new_schema_cache(
|
||||
SCHEMA_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(table_id_schema_cache)
|
||||
.add_cache(schema_cache)
|
||||
.build()
|
||||
}
|
||||
|
||||
/// Builds cache registry for frontend and datanode, including:
|
||||
/// - Table info cache
|
||||
/// - Table name cache
|
||||
/// - Table route cache
|
||||
/// - Table flow node cache
|
||||
/// - View cache
|
||||
/// - Schema cache
|
||||
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
|
||||
// Builds table info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
@@ -95,12 +133,30 @@ pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegist
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds schema cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let schema_cache = Arc::new(new_schema_cache(
|
||||
SCHEMA_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
let table_id_schema_cache = Arc::new(new_table_schema_cache(
|
||||
TABLE_SCHEMA_NAME_CACHE_NAME.to_string(),
|
||||
CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY).build(),
|
||||
kv_backend,
|
||||
));
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(table_info_cache)
|
||||
.add_cache(table_name_cache)
|
||||
.add_cache(table_route_cache)
|
||||
.add_cache(view_info_cache)
|
||||
.add_cache(table_flownode_set_cache)
|
||||
.add_cache(schema_cache)
|
||||
.add_cache(table_id_schema_cache)
|
||||
.build()
|
||||
}
|
||||
|
||||
|
||||
92
src/catalog/src/information_extension.rs
Normal file
92
src/catalog/src/information_extension.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error;
|
||||
use crate::information_schema::InformationExtension;
|
||||
|
||||
pub struct DistributedInformationExtension {
|
||||
meta_client: MetaClientRef,
|
||||
}
|
||||
|
||||
impl DistributedInformationExtension {
|
||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
||||
Self { meta_client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl InformationExtension for DistributedInformationExtension {
|
||||
type Error = crate::error::Error;
|
||||
|
||||
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_nodes(None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ListNodesSnafu)
|
||||
}
|
||||
|
||||
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
|
||||
let procedures = self
|
||||
.meta_client
|
||||
.list_procedures(&ExecutorContext::default())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ListProceduresSnafu)?
|
||||
.procedures;
|
||||
let mut result = Vec::with_capacity(procedures.len());
|
||||
for procedure in procedures {
|
||||
let pid = match procedure.id {
|
||||
Some(pid) => pid,
|
||||
None => return error::ProcedureIdNotFoundSnafu {}.fail(),
|
||||
};
|
||||
let pid = procedure::pb_pid_to_pid(&pid)
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ConvertProtoDataSnafu)?;
|
||||
let status = ProcedureStatus::try_from(procedure.status)
|
||||
.map(|v| v.as_str_name())
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
let procedure_info = ProcedureInfo {
|
||||
id: pid,
|
||||
type_name: procedure.type_name,
|
||||
start_time_ms: procedure.start_time_ms,
|
||||
end_time_ms: procedure.end_time_ms,
|
||||
state: ProcedureState::Running,
|
||||
lock_keys: procedure.lock_keys,
|
||||
};
|
||||
result.push((status, procedure_info));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_region_stats()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ListRegionStatsSnafu)
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend};
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
@@ -42,20 +43,20 @@ const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub struct CachedMetaKvBackendBuilder {
|
||||
pub struct CachedKvBackendBuilder {
|
||||
cache_max_capacity: Option<u64>,
|
||||
cache_ttl: Option<Duration>,
|
||||
cache_tti: Option<Duration>,
|
||||
meta_client: Arc<MetaClient>,
|
||||
inner: KvBackendRef,
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackendBuilder {
|
||||
pub fn new(meta_client: Arc<MetaClient>) -> Self {
|
||||
impl CachedKvBackendBuilder {
|
||||
pub fn new(inner: KvBackendRef) -> Self {
|
||||
Self {
|
||||
cache_max_capacity: None,
|
||||
cache_ttl: None,
|
||||
cache_tti: None,
|
||||
meta_client,
|
||||
inner,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +75,7 @@ impl CachedMetaKvBackendBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> CachedMetaKvBackend {
|
||||
pub fn build(self) -> CachedKvBackend {
|
||||
let cache_max_capacity = self
|
||||
.cache_max_capacity
|
||||
.unwrap_or(DEFAULT_CACHE_MAX_CAPACITY);
|
||||
@@ -85,14 +86,11 @@ impl CachedMetaKvBackendBuilder {
|
||||
.time_to_live(cache_ttl)
|
||||
.time_to_idle(cache_tti)
|
||||
.build();
|
||||
|
||||
let kv_backend = Arc::new(MetaKvBackend {
|
||||
client: self.meta_client,
|
||||
});
|
||||
let kv_backend = self.inner;
|
||||
let name = format!("CachedKvBackend({})", kv_backend.name());
|
||||
let version = AtomicUsize::new(0);
|
||||
|
||||
CachedMetaKvBackend {
|
||||
CachedKvBackend {
|
||||
kv_backend,
|
||||
cache,
|
||||
name,
|
||||
@@ -112,19 +110,29 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
|
||||
/// Therefore, it is recommended to use CachedMetaKvBackend to only read metadata related
|
||||
/// information. Note: If you read other information, you may read expired data, which depends on
|
||||
/// TTL and TTI for cache.
|
||||
pub struct CachedMetaKvBackend {
|
||||
pub struct CachedKvBackend {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: CacheBackend,
|
||||
name: String,
|
||||
version: AtomicUsize,
|
||||
}
|
||||
|
||||
impl TxnService for CachedMetaKvBackend {
|
||||
#[async_trait::async_trait]
|
||||
impl TxnService for CachedKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> std::result::Result<TxnResponse, Self::Error> {
|
||||
// TODO(hl): txn of CachedKvBackend simply pass through to inner backend without invalidating caches.
|
||||
self.kv_backend.txn(txn).await
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.kv_backend.max_txn_ops()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for CachedMetaKvBackend {
|
||||
impl KvBackend for CachedKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
@@ -305,7 +313,7 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
impl KvCacheInvalidator for CachedKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.create_new_version();
|
||||
self.cache.invalidate(key).await;
|
||||
@@ -313,7 +321,7 @@ impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedMetaKvBackend {
|
||||
impl CachedKvBackend {
|
||||
// only for test
|
||||
#[cfg(test)]
|
||||
fn wrap(kv_backend: KvBackendRef) -> Self {
|
||||
@@ -466,7 +474,7 @@ mod tests {
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
|
||||
use super::CachedMetaKvBackend;
|
||||
use super::CachedKvBackend;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SimpleKvBackend {
|
||||
@@ -540,7 +548,7 @@ mod tests {
|
||||
async fn test_cached_kv_backend() {
|
||||
let simple_kv = Arc::new(SimpleKvBackend::default());
|
||||
let get_execute_times = simple_kv.get_execute_times.clone();
|
||||
let cached_kv = CachedMetaKvBackend::wrap(simple_kv);
|
||||
let cached_kv = CachedKvBackend::wrap(simple_kv);
|
||||
|
||||
add_some_vals(&cached_kv).await;
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ use table::TableRef;
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod error;
|
||||
pub mod information_extension;
|
||||
pub mod kvbackend;
|
||||
pub mod memory;
|
||||
mod metrics;
|
||||
|
||||
@@ -180,7 +180,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
// information_schema is not available from this
|
||||
// table_metadata_manager and we return None
|
||||
.map(|schema_opts| format!("{schema_opts}"))
|
||||
.map(|schema_opts| format!("{}", schema_opts.into_inner()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
65
src/cli/Cargo.toml
Normal file
65
src/cli/Cargo.toml
Normal file
@@ -0,0 +1,65 @@
|
||||
[package]
|
||||
name = "cli"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
base64.workspace = true
|
||||
cache.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-options.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry = { workspace = true, features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime.workspace = true
|
||||
meta-client.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
common-version.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
tempfile.workspace = true
|
||||
@@ -19,6 +19,7 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::peer::Peer;
|
||||
@@ -30,11 +31,9 @@ use rand::Rng;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||
use table::table_name::TableName;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use self::metadata::TableMetadataBencher;
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::Result;
|
||||
use crate::Tool;
|
||||
|
||||
mod metadata;
|
||||
|
||||
@@ -62,7 +61,7 @@ pub struct BenchTableMetadataCommand {
|
||||
}
|
||||
|
||||
impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -73,7 +72,7 @@ impl BenchTableMetadataCommand {
|
||||
table_metadata_manager,
|
||||
count: self.count,
|
||||
};
|
||||
Ok(Instance::new(Box::new(tool), guard))
|
||||
Ok(Box::new(tool))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +83,7 @@ struct BenchTableMetadata {
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for BenchTableMetadata {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
let bencher = TableMetadataBencher::new(self.table_metadata_manager.clone(), self.count);
|
||||
bencher.bench_create().await;
|
||||
bencher.bench_get().await;
|
||||
@@ -18,7 +18,7 @@ use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cli::bench::{
|
||||
use crate::bench::{
|
||||
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||
};
|
||||
|
||||
@@ -12,24 +12,35 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use humantime::format_duration;
|
||||
use serde_json::Value;
|
||||
use servers::http::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DatabaseClient {
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_header: Option<String>,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl DatabaseClient {
|
||||
pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
|
||||
pub fn new(
|
||||
addr: String,
|
||||
catalog: String,
|
||||
auth_basic: Option<String>,
|
||||
timeout: Duration,
|
||||
) -> Self {
|
||||
let auth_header = if let Some(basic) = auth_basic {
|
||||
let encoded = general_purpose::STANDARD.encode(basic);
|
||||
Some(format!("basic {}", encoded))
|
||||
@@ -41,6 +52,7 @@ impl DatabaseClient {
|
||||
addr,
|
||||
catalog,
|
||||
auth_header,
|
||||
timeout,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +75,11 @@ impl DatabaseClient {
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
request = request.header(
|
||||
GREPTIME_DB_HEADER_TIMEOUT,
|
||||
format_duration(self.timeout).to_string(),
|
||||
);
|
||||
|
||||
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
|
||||
reason: format!("bad url: {}", url),
|
||||
})?;
|
||||
316
src/cli/src/error.rs
Normal file
316
src/cli/src/error.rs
Normal file
@@ -0,0 +1,316 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to install ring crypto provider: {}", msg))]
|
||||
InitTlsProvider {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
msg: String,
|
||||
},
|
||||
#[snafu(display("Failed to create default catalog and schema"))]
|
||||
InitMetadata {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init default timezone"))]
|
||||
InitTimezone {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_time::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop procedure manager"))]
|
||||
StopProcedureManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start wal options allocator"))]
|
||||
StartWalOptionsAllocator {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing config, msg: {}", msg))]
|
||||
MissingConfig {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal config: {}", msg))]
|
||||
IllegalConfig {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL"))]
|
||||
ReplCreation {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command"))]
|
||||
Readline {
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
source: client::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches"))]
|
||||
PrettyPrintRecordBatches {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client"))]
|
||||
StartMetaClient {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement"))]
|
||||
PlanStatement {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait"))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
#[snafu(source(from(common_config::error::Error, Box::new)))]
|
||||
source: Box<common_config::error::Error>,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to run http request: {reason}"))]
|
||||
HttpQuerySql {
|
||||
reason: String,
|
||||
#[snafu(source)]
|
||||
error: reqwest::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to manipulate file"))]
|
||||
FileIo {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to spawn thread"))]
|
||||
SpawnThread {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Other error"))]
|
||||
Other {
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build runtime"))]
|
||||
BuildRuntime {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_runtime::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||
CacheRequired {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
name: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build cache registry"))]
|
||||
BuildCacheRegistry {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: cache::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to initialize meta client"))]
|
||||
MetaClientInit {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find schema {schema} in catalog {catalog}"))]
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::InitTimezone { .. }
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. }
|
||||
| Error::FileIo { .. }
|
||||
| Error::SpawnThread { .. }
|
||||
| Error::InitTlsProvider { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -15,9 +15,11 @@
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -25,11 +27,10 @@ use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::database::DatabaseClient;
|
||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::{database, Tool};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
@@ -83,28 +84,38 @@ pub struct ExportCommand {
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
|
||||
/// The timeout of invoking the database.
|
||||
///
|
||||
/// It is used to override the server-side timeout setting.
|
||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||
#[clap(long, value_parser = humantime::parse_duration)]
|
||||
timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
let (catalog, schema) =
|
||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
let database_client = DatabaseClient::new(
|
||||
self.addr.clone(),
|
||||
catalog.clone(),
|
||||
self.auth_basic.clone(),
|
||||
// Treats `None` as `0s` to disable server-side default timeout.
|
||||
self.timeout.unwrap_or_default(),
|
||||
);
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Export {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
Ok(Box::new(Export {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,97 +463,22 @@ impl Export {
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
match self.target {
|
||||
ExportTarget::Schema => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await
|
||||
self.export_create_database()
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
self.export_create_table().await.map_err(BoxedError::new)
|
||||
}
|
||||
ExportTarget::Data => self.export_database_data().await,
|
||||
ExportTarget::Data => self.export_database_data().await.map_err(BoxedError::new),
|
||||
ExportTarget::All => {
|
||||
self.export_create_database().await?;
|
||||
self.export_create_table().await?;
|
||||
self.export_database_data().await
|
||||
self.export_create_database()
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
self.export_create_table().await.map_err(BoxedError::new)?;
|
||||
self.export_database_data().await.map_err(BoxedError::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
use client::{Client, Database};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let standalone = standalone::Command::parse_from([
|
||||
"standalone",
|
||||
"start",
|
||||
"--data-home",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
]);
|
||||
|
||||
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
|
||||
let mut instance = standalone.build(standalone_opts).await?;
|
||||
instance.start().await?;
|
||||
|
||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
database
|
||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||
.await
|
||||
.unwrap();
|
||||
database
|
||||
.sql(
|
||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) engine=mito;
|
||||
"#,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
let cli = cli::Command::parse_from([
|
||||
"cli",
|
||||
"export",
|
||||
"--addr",
|
||||
"127.0.0.1:4000",
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"schema",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
|
||||
instance.stop().await?;
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime")
|
||||
.join("cli.export.create_table")
|
||||
.join("create_tables.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
;
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@ use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
|
||||
use rustyline::hint::{Hinter, HistoryHinter};
|
||||
use rustyline::validate::{ValidationContext, ValidationResult, Validator};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cmd::ReplCommand;
|
||||
|
||||
pub(crate) struct RustylineHelper {
|
||||
hinter: HistoryHinter,
|
||||
@@ -14,19 +14,20 @@
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::cli::database::DatabaseClient;
|
||||
use crate::cli::{database, Instance, Tool};
|
||||
use crate::database::DatabaseClient;
|
||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::{database, Tool};
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ImportTarget {
|
||||
@@ -68,25 +69,35 @@ pub struct ImportCommand {
|
||||
/// The basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
|
||||
/// The timeout of invoking the database.
|
||||
///
|
||||
/// It is used to override the server-side timeout setting.
|
||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||
#[clap(long, value_parser = humantime::parse_duration)]
|
||||
timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl ImportCommand {
|
||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
let (catalog, schema) = database::split_database(&self.database)?;
|
||||
let database_client =
|
||||
DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
let (catalog, schema) =
|
||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||
let database_client = DatabaseClient::new(
|
||||
self.addr.clone(),
|
||||
catalog.clone(),
|
||||
self.auth_basic.clone(),
|
||||
// Treats `None` as `0s` to disable server-side default timeout.
|
||||
self.timeout.unwrap_or_default(),
|
||||
);
|
||||
|
||||
Ok(Instance::new(
|
||||
Box::new(Import {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
input_dir: self.input_dir.clone(),
|
||||
parallelism: self.import_jobs,
|
||||
target: self.target.clone(),
|
||||
}),
|
||||
guard,
|
||||
))
|
||||
Ok(Box::new(Import {
|
||||
catalog,
|
||||
schema,
|
||||
database_client,
|
||||
input_dir: self.input_dir.clone(),
|
||||
parallelism: self.import_jobs,
|
||||
target: self.target.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,13 +216,13 @@ impl Import {
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Import {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
match self.target {
|
||||
ImportTarget::Schema => self.import_create_table().await,
|
||||
ImportTarget::Data => self.import_database_data().await,
|
||||
ImportTarget::Schema => self.import_create_table().await.map_err(BoxedError::new),
|
||||
ImportTarget::Data => self.import_database_data().await.map_err(BoxedError::new),
|
||||
ImportTarget::All => {
|
||||
self.import_create_table().await?;
|
||||
self.import_database_data().await
|
||||
self.import_create_table().await.map_err(BoxedError::new)?;
|
||||
self.import_database_data().await.map_err(BoxedError::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
60
src/cli/src/lib.rs
Normal file
60
src/cli/src/lib.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod bench;
|
||||
pub mod error;
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
#[allow(unused)]
|
||||
mod cmd;
|
||||
mod export;
|
||||
mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
mod database;
|
||||
mod import;
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
pub use database::DatabaseClient;
|
||||
use error::Result;
|
||||
pub use repl::Repl;
|
||||
|
||||
pub use crate::bench::BenchTableMetadataCommand;
|
||||
pub use crate::export::ExportCommand;
|
||||
pub use crate::import::ImportCommand;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Tool: Send + Sync {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct AttachCommand {
|
||||
#[clap(long)]
|
||||
pub(crate) grpc_addr: String,
|
||||
#[clap(long)]
|
||||
pub(crate) meta_addr: Option<String>,
|
||||
#[clap(long, action)]
|
||||
pub(crate) disable_helper: bool,
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
#[allow(dead_code)]
|
||||
async fn build(self) -> Result<Box<dyn Tool>> {
|
||||
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
||||
}
|
||||
}
|
||||
@@ -20,14 +20,16 @@ use cache::{
|
||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||
TABLE_ROUTE_CACHE_NAME,
|
||||
};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{
|
||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
CachedKvBackend, CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatches;
|
||||
use common_telemetry::debug;
|
||||
@@ -43,15 +45,14 @@ use session::context::QueryContext;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
use crate::cli::helper::RustylineHelper;
|
||||
use crate::cli::AttachCommand;
|
||||
use crate::cmd::ReplCommand;
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu,
|
||||
};
|
||||
use crate::{error, DistributedInformationExtension};
|
||||
use crate::helper::RustylineHelper;
|
||||
use crate::{error, AttachCommand};
|
||||
|
||||
/// Captures the state of the repl, gathers commands and executes them one by one
|
||||
pub struct Repl {
|
||||
@@ -258,8 +259,9 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
.context(StartMetaClientSnafu)?;
|
||||
let meta_client = Arc::new(meta_client);
|
||||
|
||||
let cached_meta_backend =
|
||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||
let cached_meta_backend = Arc::new(
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone()))).build(),
|
||||
);
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(cached_meta_backend.clone())
|
||||
@@ -18,7 +18,7 @@ use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
|
||||
AlterTableExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests,
|
||||
QueryRequest, RequestHeader,
|
||||
};
|
||||
use arrow_flight::Ticket;
|
||||
@@ -211,9 +211,9 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
pub async fn alter(&self, expr: AlterTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
expr: Some(DdlExpr::AlterTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ cache.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
cli.workspace = true
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
@@ -53,6 +54,7 @@ flow.workspace = true
|
||||
frontend = { workspace = true, default-features = false }
|
||||
futures.workspace = true
|
||||
human-panic = "2.0"
|
||||
humantime.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
|
||||
@@ -12,39 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod bench;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
#[allow(unused)]
|
||||
mod cmd;
|
||||
mod export;
|
||||
mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
mod database;
|
||||
mod import;
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bench::BenchTableMetadataCommand;
|
||||
use clap::Parser;
|
||||
use cli::Tool;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
pub use repl::Repl;
|
||||
use plugins::SubCommand;
|
||||
use snafu::ResultExt;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::cli::import::ImportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::App;
|
||||
|
||||
use crate::{error, App, Result};
|
||||
pub const APP_NAME: &str = "greptime-cli";
|
||||
|
||||
#[async_trait]
|
||||
pub trait Tool: Send + Sync {
|
||||
async fn do_work(&self) -> Result<()>;
|
||||
}
|
||||
use async_trait::async_trait;
|
||||
|
||||
pub struct Instance {
|
||||
tool: Box<dyn Tool>,
|
||||
@@ -54,12 +32,16 @@ pub struct Instance {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(tool: Box<dyn Tool>, guard: Vec<WorkerGuard>) -> Self {
|
||||
pub fn new(tool: Box<dyn Tool>, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
tool,
|
||||
_guard: guard,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.tool.do_work().await.context(error::StartCliSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -69,7 +51,8 @@ impl App for Instance {
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
self.tool.do_work().await
|
||||
self.start().await.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wait_signal(&self) -> bool {
|
||||
@@ -96,7 +79,12 @@ impl Command {
|
||||
None,
|
||||
);
|
||||
|
||||
self.cmd.build(guard).await
|
||||
let tool = self.cmd.build().await.context(error::BuildCliSnafu)?;
|
||||
let instance = Instance {
|
||||
tool,
|
||||
_guard: guard,
|
||||
};
|
||||
Ok(instance)
|
||||
}
|
||||
|
||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<LoggingOptions> {
|
||||
@@ -112,38 +100,81 @@ impl Command {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
// Attach(AttachCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
Import(ImportCommand),
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use clap::Parser;
|
||||
use client::{Client, Database};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||
match self {
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||
SubCommand::Import(cmd) => cmd.build(guard).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct AttachCommand {
|
||||
#[clap(long)]
|
||||
pub(crate) grpc_addr: String,
|
||||
#[clap(long)]
|
||||
pub(crate) meta_addr: Option<String>,
|
||||
#[clap(long, action)]
|
||||
pub(crate) disable_helper: bool,
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
#[allow(dead_code)]
|
||||
async fn build(self) -> Result<Instance> {
|
||||
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let standalone = standalone::Command::parse_from([
|
||||
"standalone",
|
||||
"start",
|
||||
"--data-home",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
]);
|
||||
|
||||
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
|
||||
let mut instance = standalone.build(standalone_opts).await?;
|
||||
instance.start().await?;
|
||||
|
||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
database
|
||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||
.await
|
||||
.unwrap();
|
||||
database
|
||||
.sql(
|
||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) engine=mito;
|
||||
"#,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
let cli = cli::Command::parse_from([
|
||||
"cli",
|
||||
"export",
|
||||
"--addr",
|
||||
"127.0.0.1:4000",
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"schema",
|
||||
]);
|
||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||
cli_app.start().await?;
|
||||
|
||||
instance.stop().await?;
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime")
|
||||
.join("cli.export.create_table")
|
||||
.join("create_tables.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
;
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,12 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::build_datanode_cache_registry;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_telemetry::{info, warn};
|
||||
use common_version::{short_version, version};
|
||||
@@ -300,9 +302,17 @@ impl StartCommand {
|
||||
client: meta_client.clone(),
|
||||
});
|
||||
|
||||
// Builds cache registry for datanode.
|
||||
let layered_cache_registry = Arc::new(
|
||||
LayeredCacheRegistryBuilder::default()
|
||||
.add_cache_registry(build_datanode_cache_registry(meta_backend.clone()))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let mut datanode = DatanodeBuilder::new(opts.clone(), plugins)
|
||||
.with_meta_client(meta_client)
|
||||
.with_kv_backend(meta_backend)
|
||||
.with_cache_registry(layered_cache_registry)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
@@ -114,6 +114,20 @@ pub enum Error {
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build cli"))]
|
||||
BuildCli {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start cli"))]
|
||||
StartCli {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server"))]
|
||||
BuildMetaServer {
|
||||
#[snafu(implicit)]
|
||||
@@ -346,6 +360,8 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::BuildCli { source, .. } => source.status_code(),
|
||||
Error::StartCli { source, .. } => source.status_code(),
|
||||
|
||||
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||
source.status_code()
|
||||
|
||||
@@ -15,13 +15,15 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
@@ -30,7 +32,6 @@ use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_version::{short_version, version};
|
||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendInvoker};
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -41,7 +42,7 @@ use crate::error::{
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App, DistributedInformationExtension};
|
||||
use crate::{log_versions, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
@@ -246,11 +247,12 @@ impl StartCommand {
|
||||
let cache_tti = meta_config.metadata_cache_tti;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend =
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
@@ -287,9 +289,7 @@ impl StartCommand {
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(
|
||||
layered_cache_registry.clone(),
|
||||
)),
|
||||
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
||||
]);
|
||||
|
||||
let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
|
||||
|
||||
@@ -17,20 +17,21 @@ use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
@@ -46,7 +47,7 @@ use crate::error::{
|
||||
Result, StartFrontendSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App, DistributedInformationExtension};
|
||||
use crate::{log_versions, App};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
@@ -293,11 +294,12 @@ impl StartCommand {
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend =
|
||||
CachedKvBackendBuilder::new(Arc::new(MetaKvBackend::new(meta_client.clone())))
|
||||
.cache_max_capacity(cache_max_capacity)
|
||||
.cache_ttl(cache_ttl)
|
||||
.cache_tti(cache_tti)
|
||||
.build();
|
||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||
|
||||
// Builds cache registry
|
||||
@@ -327,9 +329,7 @@ impl StartCommand {
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(
|
||||
layered_cache_registry.clone(),
|
||||
)),
|
||||
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
||||
]);
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
|
||||
@@ -15,17 +15,7 @@
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::information_schema::InformationExtension;
|
||||
use client::api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use common_telemetry::{error, info};
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
@@ -43,6 +33,31 @@ lazy_static::lazy_static! {
|
||||
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
|
||||
}
|
||||
|
||||
/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
|
||||
#[cfg(unix)]
|
||||
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
let mut sigint = signal(SignalKind::interrupt())?;
|
||||
let mut sigterm = signal(SignalKind::terminate())?;
|
||||
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => {
|
||||
info!("Received SIGINT, shutting down");
|
||||
}
|
||||
_ = sigterm.recv() => {
|
||||
info!("Received SIGTERM, shutting down");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// wait for the close signal, for non-unix platform it's ctrl-c
|
||||
#[cfg(not(unix))]
|
||||
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
||||
tokio::signal::ctrl_c().await
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait App: Send {
|
||||
fn name(&self) -> &str;
|
||||
@@ -69,9 +84,9 @@ pub trait App: Send {
|
||||
self.start().await?;
|
||||
|
||||
if self.wait_signal() {
|
||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||
error!(e; "Failed to listen for ctrl-c signal");
|
||||
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
||||
if let Err(e) = start_wait_for_close_signal().await {
|
||||
error!(e; "Failed to listen for close signal");
|
||||
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
}
|
||||
@@ -105,69 +120,3 @@ fn log_env_flags() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DistributedInformationExtension {
|
||||
meta_client: MetaClientRef,
|
||||
}
|
||||
|
||||
impl DistributedInformationExtension {
|
||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
||||
Self { meta_client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl InformationExtension for DistributedInformationExtension {
|
||||
type Error = catalog::error::Error;
|
||||
|
||||
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_nodes(None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ListNodesSnafu)
|
||||
}
|
||||
|
||||
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
|
||||
let procedures = self
|
||||
.meta_client
|
||||
.list_procedures(&ExecutorContext::default())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ListProceduresSnafu)?
|
||||
.procedures;
|
||||
let mut result = Vec::with_capacity(procedures.len());
|
||||
for procedure in procedures {
|
||||
let pid = match procedure.id {
|
||||
Some(pid) => pid,
|
||||
None => return catalog::error::ProcedureIdNotFoundSnafu {}.fail(),
|
||||
};
|
||||
let pid = procedure::pb_pid_to_pid(&pid)
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ConvertProtoDataSnafu)?;
|
||||
let status = ProcedureStatus::try_from(procedure.status)
|
||||
.map(|v| v.as_str_name())
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
let procedure_info = ProcedureInfo {
|
||||
id: pid,
|
||||
type_name: procedure.type_name,
|
||||
start_time_ms: procedure.start_time_ms,
|
||||
end_time_ms: procedure.end_time_ms,
|
||||
state: ProcedureState::Running,
|
||||
lock_keys: procedure.lock_keys,
|
||||
};
|
||||
result.push((status, procedure_info));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
|
||||
self.meta_client
|
||||
.list_region_stats()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::ListRegionStatsSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,6 +497,7 @@ impl StartCommand {
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.with_cache_registry(layered_cache_registry.clone())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
@@ -69,7 +69,6 @@ fn test_load_datanode_example_config() {
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
scan_parallelism: 0,
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
..Default::default()
|
||||
}),
|
||||
@@ -205,7 +204,6 @@ fn test_load_standalone_example_config() {
|
||||
RegionEngineConfig::Mito(MitoConfig {
|
||||
auto_flush_interval: Duration::from_secs(3600),
|
||||
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||
scan_parallelism: 0,
|
||||
..Default::default()
|
||||
}),
|
||||
RegionEngineConfig::File(EngineConfig {}),
|
||||
|
||||
@@ -33,6 +33,7 @@ geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
jsonb.workspace = true
|
||||
nalgebra = "0.33"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
@@ -49,6 +50,7 @@ table.workspace = true
|
||||
wkt = { version = "0.11", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
ron = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -27,6 +27,7 @@ use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::numpy::NumpyFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::scalars::vector::VectorFunction;
|
||||
use crate::system::SystemFunction;
|
||||
use crate::table::TableFunction;
|
||||
|
||||
@@ -120,6 +121,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// Json related functions
|
||||
JsonFunction::register(&function_registry);
|
||||
|
||||
// Vector related functions
|
||||
VectorFunction::register(&function_registry);
|
||||
|
||||
// Geo functions
|
||||
#[cfg(feature = "geo")]
|
||||
crate::scalars::geo::GeoFunctions::register(&function_registry);
|
||||
|
||||
@@ -21,6 +21,7 @@ pub mod json;
|
||||
pub mod matches;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
pub mod vector;
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test;
|
||||
|
||||
@@ -22,8 +22,12 @@ use datafusion::arrow::compute::kernels::cmp::gt;
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
||||
use datatypes::arrow::datatypes::{
|
||||
DataType as ArrowDataType, Date32Type, Date64Type, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
@@ -34,13 +38,47 @@ pub struct GreatestFunction;
|
||||
|
||||
const NAME: &str = "greatest";
|
||||
|
||||
macro_rules! gt_time_types {
|
||||
($ty: ident, $columns:expr) => {{
|
||||
let column1 = $columns[0].to_arrow_array();
|
||||
let column2 = $columns[1].to_arrow_array();
|
||||
|
||||
let column1 = column1.as_primitive::<$ty>();
|
||||
let column2 = column2.as_primitive::<$ty>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
|
||||
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
|
||||
}};
|
||||
}
|
||||
|
||||
impl Function for GreatestFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::date_datatype())
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
ensure!(
|
||||
input_types.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
input_types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
match &input_types[0] {
|
||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::datetime_datatype()),
|
||||
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
|
||||
ConcreteDataType::DateTime(_) => Ok(ConcreteDataType::datetime_datatype()),
|
||||
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: input_types,
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
@@ -49,6 +87,11 @@ impl Function for GreatestFunction {
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
@@ -66,27 +109,32 @@ impl Function for GreatestFunction {
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date32)
|
||||
// Treats string as `DateTime` type.
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
||||
let column1 = column1.as_primitive::<Date64Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let column1 = columns[0].to_arrow_array();
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = columns[1].to_arrow_array();
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let column2 = column2.as_primitive::<Date64Type>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
|
||||
ConcreteDataType::DateTime(_) => gt_time_types!(Date64Type, columns),
|
||||
ConcreteDataType::Timestamp(ts_type) => match ts_type {
|
||||
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
|
||||
TimestampType::Millisecond(_) => {
|
||||
gt_time_types!(TimestampMillisecondType, columns)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
gt_time_types!(TimestampMicrosecondType, columns)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
gt_time_types!(TimestampNanosecondType, columns)
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
@@ -106,19 +154,31 @@ impl fmt::Display for GreatestFunction {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::Date;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::DateType;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Timestamp};
|
||||
use datatypes::types::{
|
||||
DateTimeType, DateType, TimestampMicrosecondType, TimestampMillisecondType,
|
||||
TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{DateVector, StringVector, Vector};
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, StringVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::DateTime(DateTimeType)
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
@@ -132,15 +192,15 @@ mod tests {
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str_utc("2001-02-01").unwrap())
|
||||
Value::DateTime(DateTime::from_str("2001-02-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str_utc("2012-12-23").unwrap())
|
||||
Value::DateTime(DateTime::from_str("2012-12-23 00:00:00", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
@@ -148,9 +208,15 @@ mod tests {
|
||||
fn test_greatest_takes_date_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::date_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
||||
@@ -168,4 +234,81 @@ mod tests {
|
||||
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_datetime_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::DateTime(DateTimeType)
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateTimeVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateTimeVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! test_timestamp {
|
||||
($type: expr,$unit: ident) => {
|
||||
paste! {
|
||||
#[test]
|
||||
fn [<test_greatest_takes_ $unit:lower _vector>]() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[$type, $type]).unwrap(),
|
||||
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
Nanosecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
Microsecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
Millisecond
|
||||
);
|
||||
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
|
||||
}
|
||||
|
||||
36
src/common/function/src/scalars/vector.rs
Normal file
36
src/common/function/src/scalars/vector.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod convert;
|
||||
mod distance;
|
||||
pub(crate) mod impl_conv;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct VectorFunction;
|
||||
|
||||
impl VectorFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
// conversion
|
||||
registry.register(Arc::new(convert::ParseVectorFunction));
|
||||
registry.register(Arc::new(convert::VectorToStringFunction));
|
||||
|
||||
// distance
|
||||
registry.register(Arc::new(distance::CosDistanceFunction));
|
||||
registry.register(Arc::new(distance::DotProductFunction));
|
||||
registry.register(Arc::new(distance::L2SqDistanceFunction));
|
||||
}
|
||||
}
|
||||
19
src/common/function/src/scalars/vector/convert.rs
Normal file
19
src/common/function/src/scalars/vector/convert.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod parse_vector;
|
||||
mod vector_to_string;
|
||||
|
||||
pub use parse_vector::ParseVectorFunction;
|
||||
pub use vector_to_string::VectorToStringFunction;
|
||||
160
src/common/function/src/scalars/vector/convert/parse_vector.rs
Normal file
160
src/common/function/src/scalars/vector/convert/parse_vector.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, InvalidVectorStringSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::types::parse_string_to_vector_type_value;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const NAME: &str = "parse_vec";
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ParseVectorFunction;
|
||||
|
||||
impl Function for ParseVectorFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let column = &columns[0];
|
||||
let size = column.len();
|
||||
|
||||
let mut result = BinaryVectorBuilder::with_capacity(size);
|
||||
for i in 0..size {
|
||||
let value = column.get(i).as_string();
|
||||
if let Some(value) = value {
|
||||
let res = parse_string_to_vector_type_value(&value, None)
|
||||
.context(InvalidVectorStringSnafu { vec_str: &value })?;
|
||||
result.push(Some(&res));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParseVectorFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::bytes::Bytes;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vector() {
|
||||
let func = ParseVectorFunction;
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]).unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Binary(Bytes::from(
|
||||
[1.0f32, 2.0, 3.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Binary(Bytes::from(
|
||||
[4.0f32, 5.0, 6.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
))
|
||||
);
|
||||
assert!(result.get(2).is_null());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_vector_error() {
|
||||
let func = ParseVectorFunction;
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("7.0,8.0,9.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let input = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,hello,9.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::types::vector_type_value_to_string;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const NAME: &str = "vec_to_string";
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorToStringFunction;
|
||||
|
||||
impl Function for VectorToStringFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::binary_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let column = &columns[0];
|
||||
let size = column.len();
|
||||
|
||||
let mut result = StringVectorBuilder::with_capacity(size);
|
||||
for i in 0..size {
|
||||
let value = column.get(i);
|
||||
match value {
|
||||
Value::Binary(bytes) => {
|
||||
let len = bytes.len();
|
||||
if len % std::mem::size_of::<f32>() != 0 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid binary length of vector: {}", len),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let dim = len / std::mem::size_of::<f32>();
|
||||
// Safety: `dim` is calculated from the length of `bytes` and is guaranteed to be valid
|
||||
let res = vector_type_value_to_string(&bytes, dim as _).unwrap();
|
||||
result.push(Some(&res));
|
||||
}
|
||||
Value::Null => {
|
||||
result.push_null();
|
||||
}
|
||||
_ => {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid value type: {:?}", value.data_type()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorToStringFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::BinaryVectorBuilder;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vector_to_string() {
|
||||
let func = VectorToStringFunction;
|
||||
|
||||
let mut builder = BinaryVectorBuilder::with_capacity(3);
|
||||
builder.push(Some(
|
||||
[1.0f32, 2.0, 3.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<_>>()
|
||||
.as_slice(),
|
||||
));
|
||||
builder.push(Some(
|
||||
[4.0f32, 5.0, 6.0]
|
||||
.iter()
|
||||
.flat_map(|e| e.to_le_bytes())
|
||||
.collect::<Vec<_>>()
|
||||
.as_slice(),
|
||||
));
|
||||
builder.push_null();
|
||||
let vector = builder.to_vector();
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[vector]).unwrap();
|
||||
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get(0), Value::String("[1,2,3]".to_string().into()));
|
||||
assert_eq!(result.get(1), Value::String("[4,5,6]".to_string().into()));
|
||||
assert_eq!(result.get(2), Value::Null);
|
||||
}
|
||||
}
|
||||
366
src/common/function/src/scalars/vector/distance.rs
Normal file
366
src/common/function/src/scalars/vector/distance.rs
Normal file
@@ -0,0 +1,366 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cos;
|
||||
mod dot;
|
||||
mod l2sq;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
macro_rules! define_distance_function {
|
||||
($StructName:ident, $display_name:expr, $similarity_method:path) => {
|
||||
|
||||
/// A function calculates the distance between two vectors.
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct $StructName;
|
||||
|
||||
impl Function for $StructName {
|
||||
fn name(&self) -> &str {
|
||||
$display_name
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let size = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(size);
|
||||
if size == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..size {
|
||||
let vec0 = match arg0_const.as_ref() {
|
||||
Some(a) => Some(Cow::Borrowed(a.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let vec1 = match arg1_const.as_ref() {
|
||||
Some(b) => Some(Cow::Borrowed(b.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
|
||||
if let (Some(vec0), Some(vec1)) = (vec0, vec1) {
|
||||
ensure!(
|
||||
vec0.len() == vec1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the vectors must match to calculate distance, have: {} vs {}",
|
||||
vec0.len(),
|
||||
vec1.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
// Checked if the length of the vectors match
|
||||
let d = $similarity_method(vec0.as_ref(), vec1.as_ref());
|
||||
result.push(Some(d));
|
||||
} else {
|
||||
result.push_null();
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for $StructName {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", $display_name.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_distance_function!(CosDistanceFunction, "vec_cos_distance", cos::cos);
|
||||
define_distance_function!(L2SqDistanceFunction, "vec_l2sq_distance", l2sq::l2sq);
|
||||
define_distance_function!(DotProductFunction, "vec_dot_product", dot::dot);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{BinaryVector, ConstantVector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_distance_string_string() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[1.0, 0.0]"),
|
||||
None,
|
||||
Some("[1.0, 0.0]"),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[0.0, 1.0]"),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec2, vec1])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_binary_binary() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(BinaryVector::from(vec![
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 128, 63, 0, 0, 0, 0]),
|
||||
None,
|
||||
Some(vec![0, 0, 128, 63, 0, 0, 0, 0]),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(BinaryVector::from(vec![
|
||||
// [0.0, 1.0]
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec2, vec1])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_string_binary() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[1.0, 0.0]"),
|
||||
None,
|
||||
Some("[1.0, 0.0]"),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(BinaryVector::from(vec![
|
||||
// [0.0, 1.0]
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec1.clone(), vec2.clone()])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[vec2, vec1])
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_const_string() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let const_str = Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from(vec!["[0.0, 1.0]"])),
|
||||
4,
|
||||
));
|
||||
|
||||
let vec1 = Arc::new(StringVector::from(vec![
|
||||
Some("[0.0, 1.0]"),
|
||||
Some("[1.0, 0.0]"),
|
||||
None,
|
||||
Some("[1.0, 0.0]"),
|
||||
])) as VectorRef;
|
||||
let vec2 = Arc::new(BinaryVector::from(vec![
|
||||
// [0.0, 1.0]
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
Some(vec![0, 0, 0, 0, 0, 0, 128, 63]),
|
||||
None,
|
||||
])) as VectorRef;
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[const_str.clone(), vec1.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(!result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[vec1.clone(), const_str.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(result.get(2).is_null());
|
||||
assert!(!result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[const_str.clone(), vec2.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(!result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
|
||||
let result = func
|
||||
.eval(
|
||||
FunctionContext::default(),
|
||||
&[vec2.clone(), const_str.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.get(0).is_null());
|
||||
assert!(!result.get(1).is_null());
|
||||
assert!(!result.get(2).is_null());
|
||||
assert!(result.get(3).is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_vector_length() {
|
||||
let funcs = [
|
||||
Box::new(CosDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(L2SqDistanceFunction {}) as Box<dyn Function>,
|
||||
Box::new(DotProductFunction {}) as Box<dyn Function>,
|
||||
];
|
||||
|
||||
for func in funcs {
|
||||
let vec1 = Arc::new(StringVector::from(vec!["[1.0]"])) as VectorRef;
|
||||
let vec2 = Arc::new(StringVector::from(vec!["[1.0, 1.0]"])) as VectorRef;
|
||||
let result = func.eval(FunctionContext::default(), &[vec1, vec2]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let vec1 = Arc::new(BinaryVector::from(vec![vec![0, 0, 128, 63]])) as VectorRef;
|
||||
let vec2 =
|
||||
Arc::new(BinaryVector::from(vec![vec![0, 0, 128, 63, 0, 0, 0, 64]])) as VectorRef;
|
||||
let result = func.eval(FunctionContext::default(), &[vec1, vec2]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
87
src/common/function/src/scalars/vector/distance/cos.rs
Normal file
87
src/common/function/src/scalars/vector/distance/cos.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use nalgebra::DVectorView;
|
||||
|
||||
/// Calculates the cos distance between two vectors.
|
||||
///
|
||||
/// **Note:** Must ensure that the length of the two vectors are the same.
|
||||
pub fn cos(lhs: &[f32], rhs: &[f32]) -> f32 {
|
||||
let lhs_vec = DVectorView::from_slice(lhs, lhs.len());
|
||||
let rhs_vec = DVectorView::from_slice(rhs, rhs.len());
|
||||
|
||||
let dot_product = lhs_vec.dot(&rhs_vec);
|
||||
let lhs_norm = lhs_vec.norm();
|
||||
let rhs_norm = rhs_vec.norm();
|
||||
if dot_product.abs() < f32::EPSILON
|
||||
|| lhs_norm.abs() < f32::EPSILON
|
||||
|| rhs_norm.abs() < f32::EPSILON
|
||||
{
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
let cos_similar = dot_product / (lhs_norm * rhs_norm);
|
||||
let res = 1.0 - cos_similar;
|
||||
if res.abs() < f32::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_cos_scalar() {
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.025, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.04, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 1.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.04, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(cos(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
}
|
||||
}
|
||||
71
src/common/function/src/scalars/vector/distance/dot.rs
Normal file
71
src/common/function/src/scalars/vector/distance/dot.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use nalgebra::DVectorView;
|
||||
|
||||
/// Calculates the dot product between two vectors.
|
||||
///
|
||||
/// **Note:** Must ensure that the length of the two vectors are the same.
|
||||
pub fn dot(lhs: &[f32], rhs: &[f32]) -> f32 {
|
||||
let lhs = DVectorView::from_slice(lhs, lhs.len());
|
||||
let rhs = DVectorView::from_slice(rhs, rhs.len());
|
||||
|
||||
lhs.dot(&rhs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dot_scalar() {
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 14.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 32.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 50.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 50.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 122.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(dot(&lhs, &rhs), 194.0, epsilon = 1e-2);
|
||||
}
|
||||
}
|
||||
71
src/common/function/src/scalars/vector/distance/l2sq.rs
Normal file
71
src/common/function/src/scalars/vector/distance/l2sq.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use nalgebra::DVectorView;
|
||||
|
||||
/// Calculates the squared L2 distance between two vectors.
|
||||
///
|
||||
/// **Note:** Must ensure that the length of the two vectors are the same.
|
||||
pub fn l2sq(lhs: &[f32], rhs: &[f32]) -> f32 {
|
||||
let lhs = DVectorView::from_slice(lhs, lhs.len());
|
||||
let rhs = DVectorView::from_slice(rhs, rhs.len());
|
||||
|
||||
(lhs - rhs).norm_squared()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_l2sq_scalar() {
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 27.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![1.0, 2.0, 3.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 108.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 14.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 77.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![0.0, 0.0, 0.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 194.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![1.0, 2.0, 3.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 108.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![4.0, 5.0, 6.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 27.0, epsilon = 1e-2);
|
||||
|
||||
let lhs = vec![7.0, 8.0, 9.0];
|
||||
let rhs = vec![7.0, 8.0, 9.0];
|
||||
assert_relative_eq!(l2sq(&lhs, &rhs), 0.0, epsilon = 1e-2);
|
||||
}
|
||||
}
|
||||
156
src/common/function/src/scalars/vector/impl_conv.rs
Normal file
156
src/common/function/src/scalars/vector/impl_conv.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::Vector;
|
||||
|
||||
/// Convert a constant string or binary literal to a vector literal.
|
||||
pub fn as_veclit_if_const(arg: &Arc<dyn Vector>) -> Result<Option<Cow<'_, [f32]>>> {
|
||||
if !arg.is_const() {
|
||||
return Ok(None);
|
||||
}
|
||||
if arg.data_type() != ConcreteDataType::string_datatype()
|
||||
&& arg.data_type() != ConcreteDataType::binary_datatype()
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
as_veclit(arg.get_ref(0))
|
||||
}
|
||||
|
||||
/// Convert a string or binary literal to a vector literal.
|
||||
pub fn as_veclit(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
|
||||
match arg.data_type() {
|
||||
ConcreteDataType::Binary(_) => arg
|
||||
.as_binary()
|
||||
.unwrap() // Safe: checked if it is a binary
|
||||
.map(binlit_as_veclit)
|
||||
.transpose(),
|
||||
ConcreteDataType::String(_) => arg
|
||||
.as_string()
|
||||
.unwrap() // Safe: checked if it is a string
|
||||
.map(|s| Ok(Cow::Owned(parse_veclit_from_strlit(s)?)))
|
||||
.transpose(),
|
||||
ConcreteDataType::Null(_) => Ok(None),
|
||||
_ => InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a u8 slice to a vector literal.
|
||||
pub fn binlit_as_veclit(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
|
||||
if bytes.len() % std::mem::size_of::<f32>() != 0 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if cfg!(target_endian = "little") {
|
||||
Ok(unsafe {
|
||||
let vec = std::slice::from_raw_parts(
|
||||
bytes.as_ptr() as *const f32,
|
||||
bytes.len() / std::mem::size_of::<f32>(),
|
||||
);
|
||||
Cow::Borrowed(vec)
|
||||
})
|
||||
} else {
|
||||
let v = bytes
|
||||
.chunks_exact(std::mem::size_of::<f32>())
|
||||
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
|
||||
.collect::<Vec<f32>>();
|
||||
Ok(Cow::Owned(v))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a string literal to a vector literal.
|
||||
/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
|
||||
pub fn parse_veclit_from_strlit(s: &str) -> Result<Vec<f32>> {
|
||||
let trimmed = s.trim();
|
||||
if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Failed to parse {s} to Vector value: not properly enclosed in brackets"
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
let content = trimmed[1..trimmed.len() - 1].trim();
|
||||
if content.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
content
|
||||
.split(',')
|
||||
.map(|s| s.trim().parse::<f32>())
|
||||
.collect::<std::result::Result<_, _>>()
|
||||
.map_err(|e| {
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!("Failed to parse {s} to Vector value: {e}"),
|
||||
}
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
/// Convert a vector literal to a binary literal.
|
||||
pub fn veclit_to_binlit(vec: &[f32]) -> Vec<u8> {
|
||||
if cfg!(target_endian = "little") {
|
||||
unsafe {
|
||||
std::slice::from_raw_parts(vec.as_ptr() as *const u8, std::mem::size_of_val(vec))
|
||||
.to_vec()
|
||||
}
|
||||
} else {
|
||||
let mut bytes = Vec::with_capacity(std::mem::size_of_val(vec));
|
||||
for e in vec {
|
||||
bytes.extend_from_slice(&e.to_le_bytes());
|
||||
}
|
||||
bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_veclit_from_strlit() {
|
||||
let result = parse_veclit_from_strlit("[1.0, 2.0, 3.0]").unwrap();
|
||||
assert_eq!(result, vec![1.0, 2.0, 3.0]);
|
||||
|
||||
let result = parse_veclit_from_strlit("[]").unwrap();
|
||||
assert_eq!(result, Vec::<f32>::new());
|
||||
|
||||
let result = parse_veclit_from_strlit("[1.0, a, 3.0]");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binlit_as_veclit() {
|
||||
let vec = &[1.0, 2.0, 3.0];
|
||||
let bytes = veclit_to_binlit(vec);
|
||||
let result = binlit_as_veclit(&bytes).unwrap();
|
||||
assert_eq!(result.as_ref(), vec);
|
||||
|
||||
let invalid_bytes = [0, 0, 128];
|
||||
let result = binlit_as_veclit(&invalid_bytes);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -14,30 +14,30 @@
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::add_column_location::LocationType;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::column_def::as_fulltext_option;
|
||||
use api::v1::{
|
||||
column_def, AddColumnLocation as Location, AlterExpr, Analyzer, ChangeColumnTypes,
|
||||
CreateTableExpr, DropColumns, RenameTable, SemanticType,
|
||||
column_def, AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr,
|
||||
DropColumns, ModifyColumnTypes, RenameTable, SemanticType,
|
||||
};
|
||||
use common_query::AddColumnLocation;
|
||||
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::region_request::ChangeOption;
|
||||
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest};
|
||||
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest};
|
||||
|
||||
use crate::error::{
|
||||
InvalidChangeFulltextOptionRequestSnafu, InvalidChangeTableOptionRequestSnafu,
|
||||
InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
|
||||
UnknownLocationTypeSnafu,
|
||||
};
|
||||
|
||||
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
|
||||
|
||||
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<AlterTableRequest> {
|
||||
/// Convert an [`AlterTableExpr`] to an [`AlterTableRequest`]
|
||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<AlterTableRequest> {
|
||||
let catalog_name = expr.catalog_name;
|
||||
let schema_name = expr.schema_name;
|
||||
let kind = expr.kind.context(MissingFieldSnafu { field: "kind" })?;
|
||||
@@ -68,25 +68,25 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
columns: add_column_requests,
|
||||
}
|
||||
}
|
||||
Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||
change_column_types,
|
||||
Kind::ModifyColumnTypes(ModifyColumnTypes {
|
||||
modify_column_types,
|
||||
}) => {
|
||||
let change_column_type_requests = change_column_types
|
||||
let modify_column_type_requests = modify_column_types
|
||||
.into_iter()
|
||||
.map(|cct| {
|
||||
let target_type =
|
||||
ColumnDataTypeWrapper::new(cct.target_type(), cct.target_type_extension)
|
||||
.into();
|
||||
|
||||
Ok(ChangeColumnTypeRequest {
|
||||
Ok(ModifyColumnTypeRequest {
|
||||
column_name: cct.column_name,
|
||||
target_type,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
AlterKind::ChangeColumnTypes {
|
||||
columns: change_column_type_requests,
|
||||
AlterKind::ModifyColumnTypes {
|
||||
columns: modify_column_type_requests,
|
||||
}
|
||||
}
|
||||
Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns {
|
||||
@@ -95,26 +95,37 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
|
||||
Kind::RenameTable(RenameTable { new_table_name }) => {
|
||||
AlterKind::RenameTable { new_table_name }
|
||||
}
|
||||
Kind::ChangeTableOptions(api::v1::ChangeTableOptions {
|
||||
change_table_options,
|
||||
}) => AlterKind::ChangeTableOptions {
|
||||
options: change_table_options
|
||||
.iter()
|
||||
.map(ChangeOption::try_from)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidChangeTableOptionRequestSnafu)?,
|
||||
},
|
||||
Kind::ChangeColumnFulltext(c) => AlterKind::ChangeColumnFulltext {
|
||||
Kind::SetTableOptions(api::v1::SetTableOptions { table_options }) => {
|
||||
AlterKind::SetTableOptions {
|
||||
options: table_options
|
||||
.iter()
|
||||
.map(SetRegionOption::try_from)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidSetTableOptionRequestSnafu)?,
|
||||
}
|
||||
}
|
||||
Kind::UnsetTableOptions(api::v1::UnsetTableOptions { keys }) => {
|
||||
AlterKind::UnsetTableOptions {
|
||||
keys: keys
|
||||
.iter()
|
||||
.map(|key| UnsetRegionOption::try_from(key.as_str()))
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.context(InvalidUnsetTableOptionRequestSnafu)?,
|
||||
}
|
||||
}
|
||||
Kind::SetColumnFulltext(c) => AlterKind::SetColumnFulltext {
|
||||
column_name: c.column_name,
|
||||
options: FulltextOptions {
|
||||
enable: c.enable,
|
||||
analyzer: as_fulltext_option(
|
||||
Analyzer::try_from(c.analyzer)
|
||||
.context(InvalidChangeFulltextOptionRequestSnafu)?,
|
||||
Analyzer::try_from(c.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: c.case_sensitive,
|
||||
},
|
||||
},
|
||||
Kind::UnsetColumnFulltext(c) => AlterKind::UnsetColumnFulltext {
|
||||
column_name: c.column_name,
|
||||
},
|
||||
};
|
||||
|
||||
let request = AlterTableRequest {
|
||||
@@ -183,7 +194,7 @@ fn parse_location(location: Option<Location>) -> Result<Option<AddColumnLocation
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, ChangeColumnType, ColumnDataType, ColumnDef, DropColumn,
|
||||
AddColumn, AddColumns, ColumnDataType, ColumnDef, DropColumn, ModifyColumnType,
|
||||
SemanticType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
@@ -192,7 +203,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_alter_expr_to_request() {
|
||||
let expr = AlterExpr {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: String::default(),
|
||||
schema_name: String::default(),
|
||||
table_name: "monitor".to_string(),
|
||||
@@ -233,7 +244,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_alter_expr_with_location_to_request() {
|
||||
let expr = AlterExpr {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: String::default(),
|
||||
schema_name: String::default(),
|
||||
table_name: "monitor".to_string(),
|
||||
@@ -309,14 +320,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_change_column_type_expr() {
|
||||
let expr = AlterExpr {
|
||||
fn test_modify_column_type_expr() {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: "test_catalog".to_string(),
|
||||
schema_name: "test_schema".to_string(),
|
||||
table_name: "monitor".to_string(),
|
||||
|
||||
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||
change_column_types: vec![ChangeColumnType {
|
||||
kind: Some(Kind::ModifyColumnTypes(ModifyColumnTypes {
|
||||
modify_column_types: vec![ModifyColumnType {
|
||||
column_name: "mem_usage".to_string(),
|
||||
target_type: ColumnDataType::String as i32,
|
||||
target_type_extension: None,
|
||||
@@ -329,22 +340,22 @@ mod tests {
|
||||
assert_eq!(alter_request.schema_name, "test_schema");
|
||||
assert_eq!("monitor".to_string(), alter_request.table_name);
|
||||
|
||||
let mut change_column_types = match alter_request.alter_kind {
|
||||
AlterKind::ChangeColumnTypes { columns } => columns,
|
||||
let mut modify_column_types = match alter_request.alter_kind {
|
||||
AlterKind::ModifyColumnTypes { columns } => columns,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let change_column_type = change_column_types.pop().unwrap();
|
||||
assert_eq!("mem_usage", change_column_type.column_name);
|
||||
let modify_column_type = modify_column_types.pop().unwrap();
|
||||
assert_eq!("mem_usage", modify_column_type.column_name);
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
change_column_type.target_type
|
||||
modify_column_type.target_type
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_column_expr() {
|
||||
let expr = AlterExpr {
|
||||
let expr = AlterTableExpr {
|
||||
catalog_name: "test_catalog".to_string(),
|
||||
schema_name: "test_schema".to_string(),
|
||||
table_name: "monitor".to_string(),
|
||||
|
||||
@@ -120,14 +120,20 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid change table option request"))]
|
||||
InvalidChangeTableOptionRequest {
|
||||
#[snafu(display("Invalid set table option request"))]
|
||||
InvalidSetTableOptionRequest {
|
||||
#[snafu(source)]
|
||||
error: MetadataError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid change fulltext option request"))]
|
||||
InvalidChangeFulltextOptionRequest {
|
||||
#[snafu(display("Invalid unset table option request"))]
|
||||
InvalidUnsetTableOptionRequest {
|
||||
#[snafu(source)]
|
||||
error: MetadataError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid set fulltext option request"))]
|
||||
InvalidSetFulltextOptionRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
@@ -156,8 +162,9 @@ impl ErrorExt for Error {
|
||||
Error::UnknownColumnDataType { .. } | Error::InvalidFulltextColumnType { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::InvalidChangeTableOptionRequest { .. }
|
||||
| Error::InvalidChangeFulltextOptionRequest { .. } => StatusCode::InvalidArguments,
|
||||
Error::InvalidSetTableOptionRequest { .. }
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@ pub use registry::{
|
||||
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
||||
};
|
||||
pub use table::{
|
||||
new_table_info_cache, new_table_name_cache, new_table_route_cache, new_view_info_cache,
|
||||
TableInfoCache, TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute,
|
||||
TableRouteCache, TableRouteCacheRef, ViewInfoCache, ViewInfoCacheRef,
|
||||
new_schema_cache, new_table_info_cache, new_table_name_cache, new_table_route_cache,
|
||||
new_table_schema_cache, new_view_info_cache, SchemaCache, SchemaCacheRef, TableInfoCache,
|
||||
TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache,
|
||||
TableRouteCacheRef, TableSchemaCache, TableSchemaCacheRef, ViewInfoCache, ViewInfoCacheRef,
|
||||
};
|
||||
|
||||
4
src/common/meta/src/cache/table.rs
vendored
4
src/common/meta/src/cache/table.rs
vendored
@@ -12,12 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod schema;
|
||||
mod table_info;
|
||||
mod table_name;
|
||||
mod table_route;
|
||||
mod table_schema;
|
||||
mod view_info;
|
||||
|
||||
pub use schema::{new_schema_cache, SchemaCache, SchemaCacheRef};
|
||||
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
||||
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
||||
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
||||
pub use table_schema::{new_table_schema_cache, TableSchemaCache, TableSchemaCacheRef};
|
||||
pub use view_info::{new_view_info_cache, ViewInfoCache, ViewInfoCacheRef};
|
||||
|
||||
73
src/common/meta/src/cache/table/schema.rs
vendored
Normal file
73
src/common/meta/src/cache/table/schema.rs
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures_util::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error::ValueNotExistSnafu;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::schema_name::{SchemaManager, SchemaName, SchemaNameKey, SchemaNameValue};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
|
||||
pub type SchemaCache = CacheContainer<SchemaName, Arc<SchemaNameValue>, CacheIdent>;
|
||||
pub type SchemaCacheRef = Arc<SchemaCache>;
|
||||
|
||||
/// Constructs a [SchemaCache].
|
||||
pub fn new_schema_cache(
|
||||
name: String,
|
||||
cache: Cache<SchemaName, Arc<SchemaNameValue>>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> SchemaCache {
|
||||
let schema_manager = SchemaManager::new(kv_backend.clone());
|
||||
let init = init_factory(schema_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(schema_manager: SchemaManager) -> Initializer<SchemaName, Arc<SchemaNameValue>> {
|
||||
Arc::new(move |schema_name| {
|
||||
let manager = schema_manager.clone();
|
||||
Box::pin(async move {
|
||||
let schema_value = manager
|
||||
.get(SchemaNameKey {
|
||||
catalog: &schema_name.catalog_name,
|
||||
schema: &schema_name.schema_name,
|
||||
})
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?
|
||||
.into_inner();
|
||||
Ok(Some(Arc::new(schema_value)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<SchemaName, Arc<SchemaNameValue>>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, crate::error::Result<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::SchemaName(schema_name) = ident {
|
||||
cache.invalidate(schema_name).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::SchemaName(_))
|
||||
}
|
||||
76
src/common/meta/src/cache/table/table_schema.rs
vendored
Normal file
76
src/common/meta/src/cache/table/table_schema.rs
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Cache for table id to schema name mapping.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures_util::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::cache::{CacheContainer, Initializer};
|
||||
use crate::error;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::schema_name::SchemaName;
|
||||
use crate::key::table_info::TableInfoManager;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
|
||||
pub type TableSchemaCache = CacheContainer<TableId, Arc<SchemaName>, CacheIdent>;
|
||||
pub type TableSchemaCacheRef = Arc<TableSchemaCache>;
|
||||
|
||||
/// Constructs a [TableSchemaCache].
|
||||
pub fn new_table_schema_cache(
|
||||
name: String,
|
||||
cache: Cache<TableId, Arc<SchemaName>>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> TableSchemaCache {
|
||||
let table_info_manager = TableInfoManager::new(kv_backend);
|
||||
let init = init_factory(table_info_manager);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(table_info_manager: TableInfoManager) -> Initializer<TableId, Arc<SchemaName>> {
|
||||
Arc::new(move |table_id| {
|
||||
let table_info_manager = table_info_manager.clone();
|
||||
Box::pin(async move {
|
||||
let raw_table_info = table_info_manager
|
||||
.get(*table_id)
|
||||
.await?
|
||||
.context(error::ValueNotExistSnafu)?
|
||||
.into_inner()
|
||||
.table_info;
|
||||
|
||||
Ok(Some(Arc::new(SchemaName {
|
||||
catalog_name: raw_table_info.catalog_name,
|
||||
schema_name: raw_table_info.schema_name,
|
||||
})))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Never invalidates table id schema cache.
|
||||
fn invalidator<'a>(
|
||||
_cache: &'a Cache<TableId, Arc<SchemaName>>,
|
||||
_ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, error::Result<()>> {
|
||||
Box::pin(std::future::ready(Ok(())))
|
||||
}
|
||||
|
||||
/// Never invalidates table id schema cache.
|
||||
fn filter(_ident: &CacheIdent) -> bool {
|
||||
false
|
||||
}
|
||||
@@ -32,6 +32,7 @@ use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::{ClusterId, DatanodeId};
|
||||
|
||||
pub mod alter_database;
|
||||
pub mod alter_logical_tables;
|
||||
pub mod alter_table;
|
||||
pub mod create_database;
|
||||
|
||||
244
src/common/meta/src/ddl/alter_database.rs
Normal file
244
src/common/meta/src/ddl/alter_database.rs
Normal file
@@ -0,0 +1,244 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::tracing::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use strum::AsRefStr;
|
||||
|
||||
use super::utils::handle_retry_error;
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, SchemaNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue};
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock};
|
||||
use crate::rpc::ddl::UnsetDatabaseOption::{self};
|
||||
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
|
||||
use crate::ClusterId;
|
||||
|
||||
pub struct AlterDatabaseProcedure {
|
||||
pub context: DdlContext,
|
||||
pub data: AlterDatabaseData,
|
||||
}
|
||||
|
||||
fn build_new_schema_value(
|
||||
mut value: SchemaNameValue,
|
||||
alter_kind: &AlterDatabaseKind,
|
||||
) -> Result<SchemaNameValue> {
|
||||
match alter_kind {
|
||||
AlterDatabaseKind::SetDatabaseOptions(options) => {
|
||||
for option in options.0.iter() {
|
||||
match option {
|
||||
SetDatabaseOption::Ttl(ttl) => {
|
||||
value.ttl = Some(*ttl);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AlterDatabaseKind::UnsetDatabaseOptions(keys) => {
|
||||
for key in keys.0.iter() {
|
||||
match key {
|
||||
UnsetDatabaseOption::Ttl => value.ttl = None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
impl AlterDatabaseProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: AlterDatabaseTask,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterDatabaseData::new(task, cluster_id)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
pub async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let value = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.get(SchemaNameKey::new(self.data.catalog(), self.data.schema()))
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
value.is_some(),
|
||||
SchemaNotFoundSnafu {
|
||||
table_schema: self.data.schema(),
|
||||
}
|
||||
);
|
||||
|
||||
self.data.schema_value = value;
|
||||
self.data.state = AlterDatabaseState::UpdateMetadata;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub async fn on_update_metadata(&mut self) -> Result<Status> {
|
||||
let schema_name = SchemaNameKey::new(self.data.catalog(), self.data.schema());
|
||||
|
||||
// Safety: schema_value is not None.
|
||||
let current_schema_value = self.data.schema_value.as_ref().unwrap();
|
||||
|
||||
let new_schema_value = build_new_schema_value(
|
||||
current_schema_value.get_inner_ref().clone(),
|
||||
&self.data.kind,
|
||||
)?;
|
||||
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.schema_manager()
|
||||
.update(schema_name, current_schema_value, &new_schema_value)
|
||||
.await?;
|
||||
|
||||
info!("Updated database metadata for schema {schema_name}");
|
||||
self.data.state = AlterDatabaseState::InvalidateSchemaCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub async fn on_invalidate_schema_cache(&mut self) -> Result<Status> {
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&Context::default(),
|
||||
&[CacheIdent::SchemaName(SchemaName {
|
||||
catalog_name: self.data.catalog().to_string(),
|
||||
schema_name: self.data.schema().to_string(),
|
||||
})],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for AlterDatabaseProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
match self.data.state {
|
||||
AlterDatabaseState::Prepare => self.on_prepare().await,
|
||||
AlterDatabaseState::UpdateMetadata => self.on_update_metadata().await,
|
||||
AlterDatabaseState::InvalidateSchemaCache => self.on_invalidate_schema_cache().await,
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let catalog = self.data.catalog();
|
||||
let schema = self.data.schema();
|
||||
|
||||
let lock_key = vec![
|
||||
CatalogLock::Read(catalog).into(),
|
||||
SchemaLock::write(catalog, schema).into(),
|
||||
];
|
||||
|
||||
LockKey::new(lock_key)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
enum AlterDatabaseState {
|
||||
Prepare,
|
||||
UpdateMetadata,
|
||||
InvalidateSchemaCache,
|
||||
}
|
||||
|
||||
/// The data of alter database procedure.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterDatabaseData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterDatabaseState,
|
||||
kind: AlterDatabaseKind,
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
schema_value: Option<DeserializedValueWithBytes<SchemaNameValue>>,
|
||||
}
|
||||
|
||||
impl AlterDatabaseData {
|
||||
pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
state: AlterDatabaseState::Prepare,
|
||||
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
|
||||
catalog_name: task.alter_expr.catalog_name,
|
||||
schema_name: task.alter_expr.schema_name,
|
||||
schema_value: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn catalog(&self) -> &str {
|
||||
&self.catalog_name
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &str {
|
||||
&self.schema_name
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::ddl::alter_database::build_new_schema_value;
|
||||
use crate::key::schema_name::SchemaNameValue;
|
||||
use crate::rpc::ddl::{
|
||||
AlterDatabaseKind, SetDatabaseOption, SetDatabaseOptions, UnsetDatabaseOption,
|
||||
UnsetDatabaseOptions,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_build_new_schema_value() {
|
||||
let set_ttl = AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(vec![
|
||||
SetDatabaseOption::Ttl(Duration::from_secs(10).into()),
|
||||
]));
|
||||
let current_schema_value = SchemaNameValue::default();
|
||||
let new_schema_value =
|
||||
build_new_schema_value(current_schema_value.clone(), &set_ttl).unwrap();
|
||||
assert_eq!(new_schema_value.ttl, Some(Duration::from_secs(10).into()));
|
||||
|
||||
let unset_ttl_alter_kind =
|
||||
AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(vec![
|
||||
UnsetDatabaseOption::Ttl,
|
||||
]));
|
||||
let new_schema_value =
|
||||
build_new_schema_value(current_schema_value, &unset_ttl_alter_kind).unwrap();
|
||||
assert_eq!(new_schema_value.ttl, None);
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
|
||||
@@ -19,7 +19,7 @@ mod update_metadata;
|
||||
|
||||
use std::vec;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::ensure;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{
|
||||
alter_request, AddColumn, AddColumns, AlterRequest, DropColumn, DropColumns, RegionColumnDef,
|
||||
@@ -91,7 +91,7 @@ fn create_proto_alter_kind(
|
||||
add_columns,
|
||||
})))
|
||||
}
|
||||
Kind::ChangeColumnTypes(x) => Ok(Some(alter_request::Kind::ChangeColumnTypes(x.clone()))),
|
||||
Kind::ModifyColumnTypes(x) => Ok(Some(alter_request::Kind::ModifyColumnTypes(x.clone()))),
|
||||
Kind::DropColumns(x) => {
|
||||
let drop_columns = x
|
||||
.drop_columns
|
||||
@@ -106,9 +106,11 @@ fn create_proto_alter_kind(
|
||||
})))
|
||||
}
|
||||
Kind::RenameTable(_) => Ok(None),
|
||||
Kind::ChangeTableOptions(v) => Ok(Some(alter_request::Kind::ChangeTableOptions(v.clone()))),
|
||||
Kind::ChangeColumnFulltext(v) => {
|
||||
Ok(Some(alter_request::Kind::ChangeColumnFulltext(v.clone())))
|
||||
Kind::SetTableOptions(v) => Ok(Some(alter_request::Kind::SetTableOptions(v.clone()))),
|
||||
Kind::UnsetTableOptions(v) => Ok(Some(alter_request::Kind::UnsetTableOptions(v.clone()))),
|
||||
Kind::SetColumnFulltext(v) => Ok(Some(alter_request::Kind::SetColumnFulltext(v.clone()))),
|
||||
Kind::UnsetColumnFulltext(v) => {
|
||||
Ok(Some(alter_request::Kind::UnsetColumnFulltext(v.clone())))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -119,12 +121,12 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::add_column_location::LocationType;
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::RegionColumnDef;
|
||||
use api::v1::{
|
||||
region, AddColumn, AddColumnLocation, AddColumns, AlterExpr, ChangeColumnType,
|
||||
ChangeColumnTypes, ColumnDataType, ColumnDef as PbColumnDef, SemanticType,
|
||||
region, AddColumn, AddColumnLocation, AddColumns, AlterTableExpr, ColumnDataType,
|
||||
ColumnDef as PbColumnDef, ModifyColumnType, ModifyColumnTypes, SemanticType,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
@@ -213,7 +215,7 @@ mod tests {
|
||||
prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name,
|
||||
@@ -280,12 +282,12 @@ mod tests {
|
||||
prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name,
|
||||
kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes {
|
||||
change_column_types: vec![ChangeColumnType {
|
||||
kind: Some(Kind::ModifyColumnTypes(ModifyColumnTypes {
|
||||
modify_column_types: vec![ModifyColumnType {
|
||||
column_name: "cpu".to_string(),
|
||||
target_type: ColumnDataType::String as i32,
|
||||
target_type_extension: None,
|
||||
@@ -306,9 +308,9 @@ mod tests {
|
||||
assert_eq!(alter_region_request.schema_version, 1);
|
||||
assert_eq!(
|
||||
alter_region_request.kind,
|
||||
Some(region::alter_request::Kind::ChangeColumnTypes(
|
||||
ChangeColumnTypes {
|
||||
change_column_types: vec![ChangeColumnType {
|
||||
Some(region::alter_request::Kind::ModifyColumnTypes(
|
||||
ModifyColumnTypes {
|
||||
modify_column_types: vec![ModifyColumnType {
|
||||
column_name: "cpu".to_string(),
|
||||
target_type: ColumnDataType::String as i32,
|
||||
target_type_extension: None,
|
||||
|
||||
@@ -52,9 +52,11 @@ impl AlterTableProcedure {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ChangeColumnTypes { .. }
|
||||
| AlterKind::ChangeTableOptions { .. }
|
||||
| AlterKind::ChangeColumnFulltext { .. } => {}
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetColumnFulltext { .. }
|
||||
| AlterKind::UnsetColumnFulltext { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
|
||||
@@ -28,6 +28,7 @@ use common_procedure::{
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use futures::TryStreamExt;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -43,7 +44,7 @@ use crate::instruction::{CacheIdent, CreateFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{FlowId, FlowPartitionId};
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
|
||||
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
|
||||
@@ -75,6 +76,7 @@ impl CreateFlowProcedure {
|
||||
source_table_ids: vec![],
|
||||
query_context,
|
||||
state: CreateFlowState::Prepare,
|
||||
prev_flow_info_value: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -90,6 +92,7 @@ impl CreateFlowProcedure {
|
||||
let flow_name = &self.data.task.flow_name;
|
||||
let sink_table_name = &self.data.task.sink_table_name;
|
||||
let create_if_not_exists = self.data.task.create_if_not_exists;
|
||||
let or_replace = self.data.task.or_replace;
|
||||
|
||||
let flow_name_value = self
|
||||
.context
|
||||
@@ -98,16 +101,56 @@ impl CreateFlowProcedure {
|
||||
.get(catalog_name, flow_name)
|
||||
.await?;
|
||||
|
||||
if create_if_not_exists && or_replace {
|
||||
// this is forbidden because not clear what does that mean exactly
|
||||
return error::UnsupportedSnafu {
|
||||
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`".to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if let Some(value) = flow_name_value {
|
||||
ensure!(
|
||||
create_if_not_exists,
|
||||
create_if_not_exists || or_replace,
|
||||
error::FlowAlreadyExistsSnafu {
|
||||
flow_name: format_full_flow_name(catalog_name, flow_name),
|
||||
}
|
||||
);
|
||||
|
||||
let flow_id = value.flow_id();
|
||||
return Ok(Status::done_with_output(flow_id));
|
||||
if create_if_not_exists {
|
||||
info!("Flow already exists, flow_id: {}", flow_id);
|
||||
return Ok(Status::done_with_output(flow_id));
|
||||
}
|
||||
|
||||
let flow_id = value.flow_id();
|
||||
let peers = self
|
||||
.context
|
||||
.flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.map_ok(|(_, value)| value.peer)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
self.data.flow_id = Some(flow_id);
|
||||
self.data.peers = peers;
|
||||
info!("Replacing flow, flow_id: {}", flow_id);
|
||||
|
||||
let flow_info_value = self
|
||||
.context
|
||||
.flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get_raw(flow_id)
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
flow_info_value.is_some(),
|
||||
error::FlowNotFoundSnafu {
|
||||
flow_name: format_full_flow_name(catalog_name, flow_name),
|
||||
}
|
||||
);
|
||||
|
||||
self.data.prev_flow_info_value = flow_info_value;
|
||||
}
|
||||
|
||||
// Ensures sink table doesn't exist.
|
||||
@@ -128,7 +171,9 @@ impl CreateFlowProcedure {
|
||||
}
|
||||
|
||||
self.collect_source_tables().await?;
|
||||
self.allocate_flow_id().await?;
|
||||
if self.data.flow_id.is_none() {
|
||||
self.allocate_flow_id().await?;
|
||||
}
|
||||
self.data.state = CreateFlowState::CreateFlows;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
@@ -153,7 +198,10 @@ impl CreateFlowProcedure {
|
||||
.map_err(add_peer_context_if_needed(peer.clone()))
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Creating flow({:?}) on flownodes with peers={:?}",
|
||||
self.data.flow_id, self.data.peers
|
||||
);
|
||||
join_all(create_flow)
|
||||
.await
|
||||
.into_iter()
|
||||
@@ -170,18 +218,29 @@ impl CreateFlowProcedure {
|
||||
async fn on_create_metadata(&mut self) -> Result<Status> {
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
// TODO(weny): Support `or_replace`.
|
||||
let (flow_info, flow_routes) = (&self.data).into();
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Created flow metadata for flow {flow_id}");
|
||||
if let Some(prev_flow_value) = self.data.prev_flow_info_value.as_ref()
|
||||
&& self.data.task.or_replace
|
||||
{
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.update_flow_metadata(flow_id, prev_flow_value, &flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Replaced flow metadata for flow {flow_id}");
|
||||
} else {
|
||||
self.context
|
||||
.flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_info, flow_routes)
|
||||
.await?;
|
||||
info!("Created flow metadata for flow {flow_id}");
|
||||
}
|
||||
|
||||
self.data.state = CreateFlowState::InvalidateFlowCache;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
debug_assert!(self.data.state == CreateFlowState::InvalidateFlowCache);
|
||||
// Safety: The flow id must be allocated.
|
||||
let flow_id = self.data.flow_id.unwrap();
|
||||
let ctx = Context {
|
||||
@@ -192,10 +251,13 @@ impl CreateFlowProcedure {
|
||||
.cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownodes: self.data.peers.clone(),
|
||||
})],
|
||||
&[
|
||||
CacheIdent::CreateFlow(CreateFlow {
|
||||
source_table_ids: self.data.source_table_ids.clone(),
|
||||
flownodes: self.data.peers.clone(),
|
||||
}),
|
||||
CacheIdent::FlowId(flow_id),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -270,6 +332,9 @@ pub struct CreateFlowData {
|
||||
pub(crate) peers: Vec<Peer>,
|
||||
pub(crate) source_table_ids: Vec<TableId>,
|
||||
pub(crate) query_context: QueryContext,
|
||||
/// For verify if prev value is consistent when need to update flow metadata.
|
||||
/// only set when `or_replace` is true.
|
||||
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||
}
|
||||
|
||||
impl From<&CreateFlowData> for CreateRequest {
|
||||
@@ -284,8 +349,9 @@ impl From<&CreateFlowData> for CreateRequest {
|
||||
.map(|table_id| api::v1::TableId { id: *table_id })
|
||||
.collect_vec(),
|
||||
sink_table_name: Some(value.task.sink_table_name.clone().into()),
|
||||
// Always be true
|
||||
// Always be true to ensure idempotent in case of retry
|
||||
create_if_not_exists: true,
|
||||
or_replace: value.task.or_replace,
|
||||
expire_after: value.task.expire_after.map(|value| ExpireAfter { value }),
|
||||
comment: value.task.comment.clone(),
|
||||
sql: value.task.sql.clone(),
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::{AddColumn, AddColumns, AlterExpr, ColumnDef, RenameTable};
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::{AddColumn, AddColumns, AlterTableExpr, ColumnDef, RenameTable};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use derive_builder::Builder;
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct TestAlterTableExpr {
|
||||
new_table_name: Option<String>,
|
||||
}
|
||||
|
||||
impl From<TestAlterTableExpr> for AlterExpr {
|
||||
impl From<TestAlterTableExpr> for AlterTableExpr {
|
||||
fn from(value: TestAlterTableExpr) -> Self {
|
||||
if let Some(new_table_name) = value.new_table_name {
|
||||
Self {
|
||||
|
||||
@@ -16,11 +16,11 @@ use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::alter_expr::Kind;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{region_request, RegionRequest};
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, AlterExpr, ChangeTableOption, ChangeTableOptions, ColumnDataType,
|
||||
ColumnDef as PbColumnDef, DropColumn, DropColumns, SemanticType,
|
||||
AddColumn, AddColumns, AlterTableExpr, ColumnDataType, ColumnDef as PbColumnDef, DropColumn,
|
||||
DropColumns, SemanticType, SetTableOptions,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::ErrorExt;
|
||||
@@ -133,7 +133,7 @@ async fn test_on_submit_alter_request() {
|
||||
.unwrap();
|
||||
|
||||
let alter_table_task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
@@ -219,7 +219,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
.unwrap();
|
||||
|
||||
let alter_table_task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
@@ -316,7 +316,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
.unwrap();
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
@@ -385,12 +385,12 @@ async fn test_on_update_table_options() {
|
||||
.unwrap();
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::ChangeTableOptions(ChangeTableOptions {
|
||||
change_table_options: vec![ChangeTableOption {
|
||||
kind: Some(Kind::SetTableOptions(SetTableOptions {
|
||||
table_options: vec![api::v1::Option {
|
||||
key: TTL_KEY.to_string(),
|
||||
value: "1d".to_string(),
|
||||
}],
|
||||
|
||||
@@ -24,6 +24,7 @@ use derive_builder::Builder;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::ddl::alter_database::AlterDatabaseProcedure;
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_database::CreateDatabaseProcedure;
|
||||
@@ -47,12 +48,13 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables, CreateTable,
|
||||
CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView, TruncateTable,
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
TruncateTable,
|
||||
};
|
||||
use crate::rpc::ddl::{
|
||||
AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask, CreateViewTask,
|
||||
DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
|
||||
AlterDatabaseTask, AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask,
|
||||
CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext,
|
||||
SubmitDdlTaskRequest, SubmitDdlTaskResponse, TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::procedure;
|
||||
@@ -129,6 +131,7 @@ impl DdlManager {
|
||||
CreateFlowProcedure,
|
||||
AlterTableProcedure,
|
||||
AlterLogicalTablesProcedure,
|
||||
AlterDatabaseProcedure,
|
||||
DropTableProcedure,
|
||||
DropFlowProcedure,
|
||||
TruncateTableProcedure,
|
||||
@@ -294,6 +297,18 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
pub async fn submit_alter_database(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
/// Submits and executes a create flow task.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_flow_task(
|
||||
@@ -593,6 +608,28 @@ async fn handle_drop_database_task(
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_alter_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_database(cluster_id, alter_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
info!(
|
||||
"Database {}.{} is altered via procedure_id {id:?}",
|
||||
alter_database_task.catalog(),
|
||||
alter_database_task.schema()
|
||||
);
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_drop_flow_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
@@ -655,10 +692,17 @@ async fn handle_create_flow_task(
|
||||
procedure_id: &procedure_id,
|
||||
err_msg: "downcast to `u32`",
|
||||
})?);
|
||||
info!(
|
||||
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
|
||||
create_flow_task.catalog_name, create_flow_task.flow_name,
|
||||
);
|
||||
if !create_flow_task.or_replace {
|
||||
info!(
|
||||
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
|
||||
create_flow_task.catalog_name, create_flow_task.flow_name,
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"Flow {}.{}({flow_id}) is replaced via procedure_id {id:?}",
|
||||
create_flow_task.catalog_name, create_flow_task.flow_name,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: procedure_id.into(),
|
||||
@@ -772,6 +816,9 @@ impl ProcedureExecutor for DdlManager {
|
||||
DropDatabase(drop_database_task) => {
|
||||
handle_drop_database_task(self, cluster_id, drop_database_task).await
|
||||
}
|
||||
AlterDatabase(alter_database_task) => {
|
||||
handle_alter_database_task(self, cluster_id, alter_database_task).await
|
||||
}
|
||||
CreateFlow(create_flow_task) => {
|
||||
handle_create_flow_task(
|
||||
self,
|
||||
|
||||
@@ -425,6 +425,13 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("The response exceeded size limit"))]
|
||||
ResponseExceededSizeLimit {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid heartbeat response"))]
|
||||
InvalidHeartbeatResponse {
|
||||
#[snafu(implicit)]
|
||||
@@ -593,6 +600,21 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid set database option, key: {}, value: {}", key, value))]
|
||||
InvalidSetDatabaseOption {
|
||||
key: String,
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid unset database option, key: {}", key))]
|
||||
InvalidUnsetDatabaseOption {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid prefix: {}, key: {}", prefix, key))]
|
||||
MismatchPrefix {
|
||||
prefix: String,
|
||||
@@ -730,7 +752,9 @@ impl ErrorExt for Error {
|
||||
| AlterLogicalTablesInvalidArguments { .. }
|
||||
| CreateLogicalTablesInvalidArguments { .. }
|
||||
| MismatchPrefix { .. }
|
||||
| TlsConfig { .. } => StatusCode::InvalidArguments,
|
||||
| TlsConfig { .. }
|
||||
| InvalidSetDatabaseOption { .. }
|
||||
| InvalidUnsetDatabaseOption { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
FlowNotFound { .. } => StatusCode::FlowNotFound,
|
||||
FlowRouteNotFound { .. } => StatusCode::Unexpected,
|
||||
@@ -746,6 +770,7 @@ impl ErrorExt for Error {
|
||||
| StopProcedureManager { source, .. } => source.status_code(),
|
||||
RegisterProcedureLoader { source, .. } => source.status_code(),
|
||||
External { source, .. } => source.status_code(),
|
||||
ResponseExceededSizeLimit { source, .. } => source.status_code(),
|
||||
OperateDatanode { source, .. } => source.status_code(),
|
||||
Table { source, .. } => source.status_code(),
|
||||
RetryLater { source, .. } => source.status_code(),
|
||||
@@ -788,13 +813,13 @@ impl Error {
|
||||
|
||||
/// Returns true if the response exceeds the size limit.
|
||||
pub fn is_exceeded_size_limit(&self) -> bool {
|
||||
if let Error::EtcdFailed {
|
||||
error: etcd_client::Error::GRpcStatus(status),
|
||||
..
|
||||
} = self
|
||||
{
|
||||
return status.code() == tonic::Code::OutOfRange;
|
||||
match self {
|
||||
Error::EtcdFailed {
|
||||
error: etcd_client::Error::GRpcStatus(status),
|
||||
..
|
||||
} => status.code() == tonic::Code::OutOfRange,
|
||||
Error::ResponseExceededSizeLimit { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ use common_telemetry::error;
|
||||
use crate::error::Result;
|
||||
use crate::heartbeat::mailbox::{IncomingMessage, MailboxRef};
|
||||
|
||||
pub mod invalidate_table_cache;
|
||||
pub mod parse_mailbox_message;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -13,21 +13,22 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, Context};
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::instruction::Instruction;
|
||||
use common_telemetry::debug;
|
||||
|
||||
use crate::cache_invalidator::{CacheInvalidatorRef, Context};
|
||||
use crate::error::Result as MetaResult;
|
||||
use crate::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use crate::instruction::Instruction;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct InvalidateTableCacheHandler {
|
||||
pub struct InvalidateCacheHandler {
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
|
||||
impl HeartbeatResponseHandler for InvalidateCacheHandler {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
ctx.incoming_message.as_ref(),
|
||||
@@ -37,13 +38,10 @@ impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
|
||||
|
||||
async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
|
||||
let Some((_, Instruction::InvalidateCaches(caches))) = ctx.incoming_message.take() else {
|
||||
unreachable!("InvalidateTableCacheHandler: should be guarded by 'is_acceptable'")
|
||||
unreachable!("InvalidateCacheHandler: should be guarded by 'is_acceptable'")
|
||||
};
|
||||
|
||||
debug!(
|
||||
"InvalidateTableCacheHandler: invalidating caches: {:?}",
|
||||
caches
|
||||
);
|
||||
debug!("InvalidateCacheHandler: invalidating caches: {:?}", caches);
|
||||
|
||||
// Invalidate local cache always success
|
||||
let _ = self
|
||||
@@ -55,7 +53,7 @@ impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
|
||||
}
|
||||
}
|
||||
|
||||
impl InvalidateTableCacheHandler {
|
||||
impl InvalidateCacheHandler {
|
||||
pub fn new(cache_invalidator: CacheInvalidatorRef) -> Self {
|
||||
Self { cache_invalidator }
|
||||
}
|
||||
@@ -90,6 +90,7 @@
|
||||
pub mod catalog_name;
|
||||
pub mod datanode_table;
|
||||
pub mod flow;
|
||||
pub mod maintenance;
|
||||
pub mod node_address;
|
||||
mod schema_metadata_manager;
|
||||
pub mod schema_name;
|
||||
@@ -148,7 +149,7 @@ use crate::DatanodeId;
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*";
|
||||
pub const MAINTENANCE_KEY: &str = "__maintenance";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
pub const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
|
||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
@@ -564,13 +565,13 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_create_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_view_info = on_create_view_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty view info during the create view info",
|
||||
err_msg: "Reads the empty view info in comparing operation of creating view metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -643,13 +644,13 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_create_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route = on_create_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
err_msg: "Reads the empty table route in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -730,13 +731,13 @@ impl TableMetadataManager {
|
||||
for on_failure in on_failures {
|
||||
let remote_table_info = (on_failure.on_create_table_info_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route = (on_failure.on_create_table_route_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
err_msg: "Reads the empty table route in comparing operation of creating table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -914,7 +915,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the rename table metadata",
|
||||
err_msg: "Reads the empty table info in comparing operation of the rename table metadata",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -960,7 +961,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_info = on_update_table_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
err_msg: "Reads the empty table info in comparing operation of the updating table info",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1011,7 +1012,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_view_info = on_update_view_info_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty view info during the updating view info",
|
||||
err_msg: "Reads the empty view info in comparing operation of the updating view info",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1068,7 +1069,7 @@ impl TableMetadataManager {
|
||||
for on_failure in on_failures {
|
||||
let remote_table_info = (on_failure.on_update_table_info_failure)(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
err_msg: "Reads the empty table info in comparing operation of the updating table info",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1120,7 +1121,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating table route",
|
||||
err_msg: "Reads the empty table route in comparing operation of the updating table route",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1172,7 +1173,7 @@ impl TableMetadataManager {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_table_route = on_update_table_route_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating leader region status",
|
||||
err_msg: "Reads the empty table route in comparing operation of the updating leader region status",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
@@ -1260,7 +1261,8 @@ impl_metadata_value! {
|
||||
FlowNameValue,
|
||||
FlowRouteValue,
|
||||
TableFlowValue,
|
||||
NodeAddressValue
|
||||
NodeAddressValue,
|
||||
SchemaNameValue
|
||||
}
|
||||
|
||||
impl_optional_metadata_value! {
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::key::flow::flow_name::FlowNameManager;
|
||||
use crate::key::flow::flownode_flow::FlownodeFlowManager;
|
||||
pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{FlowId, MetadataKey};
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, MetadataKey};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
@@ -197,7 +197,7 @@ impl FlowMetadataManager {
|
||||
on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow name during the creating flow, flow_id: {flow_id}"
|
||||
"Reads the empty flow name in comparing operation of the creating flow, flow_id: {flow_id}"
|
||||
),
|
||||
}
|
||||
})?;
|
||||
@@ -220,7 +220,7 @@ impl FlowMetadataManager {
|
||||
let remote_flow =
|
||||
on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow during the creating flow, flow_id: {flow_id}"
|
||||
"Reads the empty flow in comparing operation of creating flow, flow_id: {flow_id}"
|
||||
),
|
||||
})?;
|
||||
let op_name = "creating flow";
|
||||
@@ -230,6 +230,102 @@ impl FlowMetadataManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update metadata for flow and returns an error if old metadata IS NOT exists.
|
||||
pub async fn update_flow_metadata(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_info: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
new_flow_info: &FlowInfoValue,
|
||||
flow_routes: Vec<(FlowPartitionId, FlowRouteValue)>,
|
||||
) -> Result<()> {
|
||||
let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) =
|
||||
self.flow_name_manager.build_update_txn(
|
||||
&new_flow_info.catalog_name,
|
||||
&new_flow_info.flow_name,
|
||||
flow_id,
|
||||
)?;
|
||||
|
||||
let (create_flow_txn, on_create_flow_failure) =
|
||||
self.flow_info_manager
|
||||
.build_update_txn(flow_id, current_flow_info, new_flow_info)?;
|
||||
|
||||
let create_flow_routes_txn = self
|
||||
.flow_route_manager
|
||||
.build_create_txn(flow_id, flow_routes.clone())?;
|
||||
|
||||
let create_flownode_flow_txn = self
|
||||
.flownode_flow_manager
|
||||
.build_create_txn(flow_id, new_flow_info.flownode_ids().clone());
|
||||
|
||||
let create_table_flow_txn = self.table_flow_manager.build_create_txn(
|
||||
flow_id,
|
||||
flow_routes
|
||||
.into_iter()
|
||||
.map(|(partition_id, route)| (partition_id, TableFlowValue { peer: route.peer }))
|
||||
.collect(),
|
||||
new_flow_info.source_table_ids(),
|
||||
)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![
|
||||
create_flow_flow_name_txn,
|
||||
create_flow_txn,
|
||||
create_flow_routes_txn,
|
||||
create_flownode_flow_txn,
|
||||
create_table_flow_txn,
|
||||
]);
|
||||
info!(
|
||||
"Creating flow {}.{}({}), with {} txn operations",
|
||||
new_flow_info.catalog_name,
|
||||
new_flow_info.flow_name,
|
||||
flow_id,
|
||||
txn.max_operations()
|
||||
);
|
||||
|
||||
let mut resp = self.kv_backend.txn(txn).await?;
|
||||
if !resp.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||
let remote_flow_flow_name =
|
||||
on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow name in comparing operation of the updating flow, flow_id: {flow_id}"
|
||||
),
|
||||
}
|
||||
})?;
|
||||
|
||||
if remote_flow_flow_name.flow_id() != flow_id {
|
||||
info!(
|
||||
"Trying to updating flow {}.{}({}), but flow({}) already exists with a different flow id",
|
||||
new_flow_info.catalog_name,
|
||||
new_flow_info.flow_name,
|
||||
flow_id,
|
||||
remote_flow_flow_name.flow_id()
|
||||
);
|
||||
|
||||
return error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads different flow id when updating flow({2}.{3}), prev flow id = {0}, updating with flow id = {1}",
|
||||
remote_flow_flow_name.flow_id(),
|
||||
flow_id,
|
||||
new_flow_info.catalog_name,
|
||||
new_flow_info.flow_name,
|
||||
),
|
||||
}.fail();
|
||||
}
|
||||
|
||||
let remote_flow =
|
||||
on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
|
||||
err_msg: format!(
|
||||
"Reads the empty flow in comparing operation of the updating flow, flow_id: {flow_id}"
|
||||
),
|
||||
})?;
|
||||
let op_name = "updating flow";
|
||||
ensure_values!(*remote_flow, new_flow_info.clone(), op_name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flow_metadata_keys(&self, flow_id: FlowId, flow_value: &FlowInfoValue) -> Vec<Vec<u8>> {
|
||||
let source_table_ids = flow_value.source_table_ids();
|
||||
let mut keys =
|
||||
@@ -560,4 +656,222 @@ mod tests {
|
||||
// Ensures all keys are deleted
|
||||
assert!(mem_kv.is_empty())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
|
||||
let flow_id = 10;
|
||||
let flow_value = test_flow_info_value(
|
||||
"flow",
|
||||
[(0, 1u64), (1, 2u64)].into(),
|
||||
vec![1024, 1025, 1026],
|
||||
);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
1u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let new_flow_value = {
|
||||
let mut tmp = flow_value.clone();
|
||||
tmp.raw_sql = "new".to_string();
|
||||
tmp
|
||||
};
|
||||
|
||||
// Update flow instead
|
||||
flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&new_flow_value,
|
||||
flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let got = flow_metadata_manager
|
||||
.flow_info_manager()
|
||||
.get(flow_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let routes = flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
routes,
|
||||
vec![
|
||||
(
|
||||
FlowRouteKey::new(flow_id, 1),
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
FlowRouteKey::new(flow_id, 2),
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
]
|
||||
);
|
||||
assert_eq!(got, new_flow_value);
|
||||
let flows = flow_metadata_manager
|
||||
.flownode_flow_manager()
|
||||
.flows(1)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(flows, vec![(flow_id, 0)]);
|
||||
for table_id in [1024, 1025, 1026] {
|
||||
let nodes = flow_metadata_manager
|
||||
.table_flow_manager()
|
||||
.flows(table_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
nodes,
|
||||
vec![
|
||||
(
|
||||
TableFlowKey::new(table_id, 1, flow_id, 1),
|
||||
TableFlowValue {
|
||||
peer: Peer::empty(1)
|
||||
}
|
||||
),
|
||||
(
|
||||
TableFlowKey::new(table_id, 2, flow_id, 2),
|
||||
TableFlowValue {
|
||||
peer: Peer::empty(2)
|
||||
}
|
||||
)
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata_flow_replace_diff_id_err() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
|
||||
let flow_id = 10;
|
||||
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
1u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// update again with same flow id
|
||||
flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&flow_value,
|
||||
flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
// update again with wrong flow id, expected error
|
||||
let err = flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id + 1,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&flow_value,
|
||||
flow_routes,
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, error::Error::Unexpected { .. });
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("Reads different flow id when updating flow"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_flow_metadata_unexpected_err_prev_value_diff() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
|
||||
let flow_id = 10;
|
||||
let catalog_name = "greptime";
|
||||
let flow_value = test_flow_info_value("flow", [(0, 1u64)].into(), vec![1024, 1025, 1026]);
|
||||
let flow_routes = vec![
|
||||
(
|
||||
1u32,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(1),
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
FlowRouteValue {
|
||||
peer: Peer::empty(2),
|
||||
},
|
||||
),
|
||||
];
|
||||
flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value.clone(), flow_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates again.
|
||||
let another_sink_table_name = TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: "my_schema".to_string(),
|
||||
table_name: "another_sink_table".to_string(),
|
||||
};
|
||||
let flow_value = FlowInfoValue {
|
||||
catalog_name: "greptime".to_string(),
|
||||
flow_name: "flow".to_string(),
|
||||
source_table_ids: vec![1024, 1025, 1026],
|
||||
sink_table_name: another_sink_table_name,
|
||||
flownode_ids: [(0, 1u64)].into(),
|
||||
raw_sql: "raw".to_string(),
|
||||
expire_after: Some(300),
|
||||
comment: "hi".to_string(),
|
||||
options: Default::default(),
|
||||
};
|
||||
let err = flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
flow_id,
|
||||
&DeserializedValueWithBytes::from_inner(flow_value.clone()),
|
||||
&flow_value,
|
||||
flow_routes.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(
|
||||
err.to_string().contains("Reads the different value"),
|
||||
"error: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::FlownodeId;
|
||||
|
||||
@@ -196,6 +196,19 @@ impl FlowInfoManager {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the [FlowInfoValue] with original bytes of specified `flow_id`.
|
||||
pub async fn get_raw(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<FlowInfoValue>>> {
|
||||
let key = FlowInfoKey::new(flow_id).to_bytes();
|
||||
self.kv_backend
|
||||
.get(&key)
|
||||
.await?
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Builds a create flow transaction.
|
||||
/// It is expected that the `__flow/info/{flow_id}` wasn't occupied.
|
||||
/// Otherwise, the transaction will retrieve existing value.
|
||||
@@ -215,6 +228,36 @@ impl FlowInfoManager {
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a update flow transaction.
|
||||
/// It is expected that the `__flow/info/{flow_id}` IS ALREADY occupied and equal to `prev_flow_value`,
|
||||
/// but the new value can be the same, so to allow replace operation to happen even when the value is the same.
|
||||
/// Otherwise, the transaction will retrieve existing value and fail.
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
current_flow_value: &DeserializedValueWithBytes<FlowInfoValue>,
|
||||
new_flow_value: &FlowInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowInfoDecodeResult,
|
||||
)> {
|
||||
let key = FlowInfoKey::new(flow_id).to_bytes();
|
||||
let raw_value = new_flow_value.try_as_raw_value()?;
|
||||
let prev_value = current_flow_value.get_raw_bytes();
|
||||
let txn = Txn::new()
|
||||
.when(vec![
|
||||
Compare::new(key.clone(), CompareOp::NotEqual, None),
|
||||
Compare::new(key.clone(), CompareOp::Equal, Some(prev_value)),
|
||||
])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), raw_value)])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN,
|
||||
};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -237,6 +237,37 @@ impl FlowNameManager {
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a update flow name transaction. Which doesn't change either the name or id, just checking if they are the same.
|
||||
/// It's expected that the `__flow/name/{catalog}/{flow_name}` IS already occupied,
|
||||
/// and both flow name and flow id is the same.
|
||||
/// Otherwise, the transaction will retrieve existing value(and fail).
|
||||
pub fn build_update_txn(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
flow_name: &str,
|
||||
flow_id: FlowId,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> FlowNameDecodeResult,
|
||||
)> {
|
||||
let key = FlowNameKey::new(catalog_name, flow_name);
|
||||
let raw_key = key.to_bytes();
|
||||
let flow_flow_name_value = FlowNameValue::new(flow_id);
|
||||
let raw_value = flow_flow_name_value.try_as_raw_value()?;
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::new(
|
||||
raw_key.clone(),
|
||||
CompareOp::Equal,
|
||||
Some(raw_value),
|
||||
)])
|
||||
.or_else(vec![TxnOp::Get(raw_key.clone())]);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
86
src/common/meta/src/key/maintenance.rs
Normal file
86
src/common/meta/src/key/maintenance.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::MAINTENANCE_KEY;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
pub type MaintenanceModeManagerRef = Arc<MaintenanceModeManager>;
|
||||
|
||||
/// The maintenance mode manager.
|
||||
///
|
||||
/// Used to enable or disable maintenance mode.
|
||||
#[derive(Clone)]
|
||||
pub struct MaintenanceModeManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl MaintenanceModeManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Enables maintenance mode.
|
||||
pub async fn set_maintenance_mode(&self) -> Result<()> {
|
||||
let req = PutRequest {
|
||||
key: Vec::from(MAINTENANCE_KEY),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.put(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unsets maintenance mode.
|
||||
pub async fn unset_maintenance_mode(&self) -> Result<()> {
|
||||
self.kv_backend
|
||||
.delete(MAINTENANCE_KEY.as_bytes(), false)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if maintenance mode is enabled.
|
||||
pub async fn maintenance_mode(&self) -> Result<bool> {
|
||||
self.kv_backend.exists(MAINTENANCE_KEY.as_bytes()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::key::maintenance::MaintenanceModeManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maintenance_mode_manager() {
|
||||
let maintenance_mode_manager = Arc::new(MaintenanceModeManager::new(Arc::new(
|
||||
MemoryKvBackend::new(),
|
||||
)));
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.set_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
}
|
||||
@@ -19,41 +19,39 @@ use std::sync::Arc;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::cache::{SchemaCacheRef, TableSchemaCacheRef};
|
||||
use crate::error::TableInfoNotFoundSnafu;
|
||||
use crate::key::schema_name::{SchemaManager, SchemaNameKey};
|
||||
use crate::key::table_info::{TableInfoManager, TableInfoManagerRef};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::{error, SchemaOptions};
|
||||
|
||||
pub type SchemaMetadataManagerRef = Arc<SchemaMetadataManager>;
|
||||
|
||||
pub struct SchemaMetadataManager {
|
||||
table_info_manager: TableInfoManagerRef,
|
||||
schema_manager: SchemaManager,
|
||||
table_id_schema_cache: TableSchemaCacheRef,
|
||||
schema_cache: SchemaCacheRef,
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
kv_backend: KvBackendRef,
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
}
|
||||
|
||||
impl SchemaMetadataManager {
|
||||
/// Creates a new database meta
|
||||
#[cfg(not(any(test, feature = "testing")))]
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
|
||||
let schema_manager = SchemaManager::new(kv_backend);
|
||||
pub fn new(table_id_schema_cache: TableSchemaCacheRef, schema_cache: SchemaCacheRef) -> Self {
|
||||
Self {
|
||||
table_info_manager,
|
||||
schema_manager,
|
||||
table_id_schema_cache,
|
||||
schema_cache,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new database meta
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
|
||||
let schema_manager = SchemaManager::new(kv_backend.clone());
|
||||
pub fn new(
|
||||
kv_backend: crate::kv_backend::KvBackendRef,
|
||||
table_id_schema_cache: TableSchemaCacheRef,
|
||||
schema_cache: SchemaCacheRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_info_manager,
|
||||
schema_manager,
|
||||
table_id_schema_cache,
|
||||
schema_cache,
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
@@ -62,20 +60,16 @@ impl SchemaMetadataManager {
|
||||
pub async fn get_schema_options_by_table_id(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> error::Result<Option<SchemaOptions>> {
|
||||
let table_info = self
|
||||
.table_info_manager
|
||||
) -> error::Result<Option<Arc<SchemaOptions>>> {
|
||||
let schema_name = self
|
||||
.table_id_schema_cache
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format!("table id: {}", table_id),
|
||||
})?;
|
||||
|
||||
let key = SchemaNameKey::new(
|
||||
&table_info.table_info.catalog_name,
|
||||
&table_info.table_info.schema_name,
|
||||
);
|
||||
self.schema_manager.get(key).await
|
||||
self.schema_cache.get_by_ref(&schema_name).await
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
@@ -97,17 +91,19 @@ impl SchemaMetadataManager {
|
||||
meta: Default::default(),
|
||||
table_type: TableType::Base,
|
||||
});
|
||||
let (txn, _) = self
|
||||
.table_info_manager
|
||||
let table_info_manager =
|
||||
crate::key::table_info::TableInfoManager::new(self.kv_backend.clone());
|
||||
let (txn, _) = table_info_manager
|
||||
.build_create_txn(table_id, &value)
|
||||
.unwrap();
|
||||
let resp = self.kv_backend.txn(txn).await.unwrap();
|
||||
assert!(resp.succeeded, "Failed to create table metadata");
|
||||
let key = SchemaNameKey {
|
||||
let key = crate::key::schema_name::SchemaNameKey {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
};
|
||||
self.schema_manager
|
||||
|
||||
crate::key::schema_name::SchemaManager::new(self.kv_backend.clone())
|
||||
.create(key, schema_value, false)
|
||||
.await
|
||||
.expect("Failed to create schema metadata");
|
||||
|
||||
@@ -15,16 +15,20 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_time::DatabaseTimeToLive;
|
||||
use futures::stream::BoxStream;
|
||||
use humantime_serde::re::humantime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use super::txn_helper::TxnOpGetResponseSet;
|
||||
use super::DeserializedValueWithBytes;
|
||||
use crate::ensure_values;
|
||||
use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result};
|
||||
use crate::key::{MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
@@ -53,15 +57,13 @@ impl Default for SchemaNameKey<'_> {
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SchemaNameValue {
|
||||
#[serde(default)]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub ttl: Option<Duration>,
|
||||
pub ttl: Option<DatabaseTimeToLive>,
|
||||
}
|
||||
|
||||
impl Display for SchemaNameValue {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if let Some(ttl) = self.ttl {
|
||||
let ttl = humantime::format_duration(ttl);
|
||||
write!(f, "ttl='{ttl}'")?;
|
||||
if let Some(ttl) = self.ttl.map(|i| i.to_string()) {
|
||||
write!(f, "ttl='{}'", ttl)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -92,11 +94,8 @@ impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
|
||||
impl From<SchemaNameValue> for HashMap<String, String> {
|
||||
fn from(value: SchemaNameValue) -> Self {
|
||||
let mut opts = HashMap::new();
|
||||
if let Some(ttl) = value.ttl {
|
||||
opts.insert(
|
||||
OPT_KEY_TTL.to_string(),
|
||||
format!("{}", humantime::format_duration(ttl)),
|
||||
);
|
||||
if let Some(ttl) = value.ttl.map(|ttl| ttl.to_string()) {
|
||||
opts.insert(OPT_KEY_TTL.to_string(), ttl);
|
||||
}
|
||||
opts
|
||||
}
|
||||
@@ -167,10 +166,13 @@ impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SchemaManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
pub type SchemaNameDecodeResult = Result<Option<DeserializedValueWithBytes<SchemaNameValue>>>;
|
||||
|
||||
impl SchemaManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
@@ -204,11 +206,15 @@ impl SchemaManager {
|
||||
self.kv_backend.exists(&raw_key).await
|
||||
}
|
||||
|
||||
pub async fn get(&self, schema: SchemaNameKey<'_>) -> Result<Option<SchemaNameValue>> {
|
||||
pub async fn get(
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
) -> Result<Option<DeserializedValueWithBytes<SchemaNameValue>>> {
|
||||
let raw_key = schema.to_bytes();
|
||||
let value = self.kv_backend.get(&raw_key).await?;
|
||||
value
|
||||
.and_then(|v| SchemaNameValue::try_from_raw_value(v.value.as_ref()).transpose())
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
@@ -220,6 +226,54 @@ impl SchemaManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
current_schema_value: &DeserializedValueWithBytes<SchemaNameValue>,
|
||||
new_schema_value: &SchemaNameValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> SchemaNameDecodeResult,
|
||||
)> {
|
||||
let raw_key = schema.to_bytes();
|
||||
let raw_value = current_schema_value.get_raw_bytes();
|
||||
let new_raw_value: Vec<u8> = new_schema_value.try_as_raw_value()?;
|
||||
|
||||
let txn = Txn::compare_and_put(raw_key.clone(), raw_value, new_raw_value);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Updates a [SchemaNameKey].
|
||||
pub async fn update(
|
||||
&self,
|
||||
schema: SchemaNameKey<'_>,
|
||||
current_schema_value: &DeserializedValueWithBytes<SchemaNameValue>,
|
||||
new_schema_value: &SchemaNameValue,
|
||||
) -> Result<()> {
|
||||
let (txn, on_failure) =
|
||||
self.build_update_txn(schema, current_schema_value, new_schema_value)?;
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
if !r.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let remote_schema_value = on_failure(&mut set)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg:
|
||||
"Reads the empty schema name value in comparing operation of updating schema name value",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the updating schema name value";
|
||||
ensure_values!(&remote_schema_value, new_schema_value, op_name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a schema stream, it lists all schemas belong to the target `catalog`.
|
||||
pub fn schema_names(&self, catalog: &str) -> BoxStream<'static, Result<String>> {
|
||||
let start_key = SchemaNameKey::range_start_key(catalog);
|
||||
@@ -254,6 +308,7 @@ impl<'a> From<&'a SchemaName> for SchemaNameKey<'a> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
@@ -264,9 +319,14 @@ mod tests {
|
||||
assert_eq!("", schema_value.to_string());
|
||||
|
||||
let schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(9)),
|
||||
ttl: Some(Duration::from_secs(9).into()),
|
||||
};
|
||||
assert_eq!("ttl='9s'", schema_value.to_string());
|
||||
|
||||
let schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(0).into()),
|
||||
};
|
||||
assert_eq!("ttl='forever'", schema_value.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -279,17 +339,36 @@ mod tests {
|
||||
assert_eq!(key, parsed);
|
||||
|
||||
let value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(10)),
|
||||
ttl: Some(Duration::from_secs(10).into()),
|
||||
};
|
||||
let mut opts: HashMap<String, String> = HashMap::new();
|
||||
opts.insert("ttl".to_string(), "10s".to_string());
|
||||
let from_value = SchemaNameValue::try_from(&opts).unwrap();
|
||||
assert_eq!(value, from_value);
|
||||
|
||||
let parsed = SchemaNameValue::try_from_raw_value("{\"ttl\":\"10s\"}".as_bytes()).unwrap();
|
||||
let parsed = SchemaNameValue::try_from_raw_value(
|
||||
serde_json::json!({"ttl": "10s"}).to_string().as_bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(Some(value), parsed);
|
||||
|
||||
let forever = SchemaNameValue {
|
||||
ttl: Some(Default::default()),
|
||||
};
|
||||
let parsed = SchemaNameValue::try_from_raw_value(
|
||||
serde_json::json!({"ttl": "forever"}).to_string().as_bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(Some(forever), parsed);
|
||||
|
||||
let instant_err = SchemaNameValue::try_from_raw_value(
|
||||
serde_json::json!({"ttl": "instant"}).to_string().as_bytes(),
|
||||
);
|
||||
assert!(instant_err.is_err());
|
||||
|
||||
let none = SchemaNameValue::try_from_raw_value("null".as_bytes()).unwrap();
|
||||
assert!(none.is_none());
|
||||
|
||||
let err_empty = SchemaNameValue::try_from_raw_value("".as_bytes());
|
||||
assert!(err_empty.is_err());
|
||||
}
|
||||
@@ -306,4 +385,52 @@ mod tests {
|
||||
|
||||
assert!(!manager.exists(wrong_schema_key).await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_schema_value() {
|
||||
let manager = SchemaManager::new(Arc::new(MemoryKvBackend::default()));
|
||||
let schema_key = SchemaNameKey::new("my-catalog", "my-schema");
|
||||
manager.create(schema_key, None, false).await.unwrap();
|
||||
|
||||
let current_schema_value = manager.get(schema_key).await.unwrap().unwrap();
|
||||
let new_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(10).into()),
|
||||
};
|
||||
manager
|
||||
.update(schema_key, ¤t_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Update with the same value, should be ok
|
||||
manager
|
||||
.update(schema_key, ¤t_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let new_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(40).into()),
|
||||
};
|
||||
let incorrect_schema_value = SchemaNameValue {
|
||||
ttl: Some(Duration::from_secs(20).into()),
|
||||
}
|
||||
.try_as_raw_value()
|
||||
.unwrap();
|
||||
let incorrect_schema_value =
|
||||
DeserializedValueWithBytes::from_inner_slice(&incorrect_schema_value).unwrap();
|
||||
|
||||
manager
|
||||
.update(schema_key, &incorrect_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
let current_schema_value = manager.get(schema_key).await.unwrap().unwrap();
|
||||
let new_schema_value = SchemaNameValue { ttl: None };
|
||||
manager
|
||||
.update(schema_key, ¤t_schema_value, &new_schema_value)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let current_schema_value = manager.get(schema_key).await.unwrap().unwrap();
|
||||
assert_eq!(new_schema_value, *current_schema_value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,6 +136,13 @@ pub struct Txn {
|
||||
c_else: bool,
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
impl Txn {
|
||||
pub fn req(&self) -> &TxnRequest {
|
||||
&self.req
|
||||
}
|
||||
}
|
||||
|
||||
impl Txn {
|
||||
pub fn merge_all<T: IntoIterator<Item = Txn>>(values: T) -> Self {
|
||||
values
|
||||
|
||||
@@ -15,24 +15,27 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::result;
|
||||
|
||||
use api::v1::alter_database_expr::Kind as PbAlterDatabaseKind;
|
||||
use api::v1::meta::ddl_task_request::Task;
|
||||
use api::v1::meta::{
|
||||
AlterTableTask as PbAlterTableTask, AlterTableTasks as PbAlterTableTasks,
|
||||
CreateDatabaseTask as PbCreateDatabaseTask, CreateFlowTask as PbCreateFlowTask,
|
||||
CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
|
||||
CreateViewTask as PbCreateViewTask, DdlTaskRequest as PbDdlTaskRequest,
|
||||
DdlTaskResponse as PbDdlTaskResponse, DropDatabaseTask as PbDropDatabaseTask,
|
||||
DropFlowTask as PbDropFlowTask, DropTableTask as PbDropTableTask,
|
||||
DropTableTasks as PbDropTableTasks, DropViewTask as PbDropViewTask, Partition, ProcedureId,
|
||||
AlterDatabaseTask as PbAlterDatabaseTask, AlterTableTask as PbAlterTableTask,
|
||||
AlterTableTasks as PbAlterTableTasks, CreateDatabaseTask as PbCreateDatabaseTask,
|
||||
CreateFlowTask as PbCreateFlowTask, CreateTableTask as PbCreateTableTask,
|
||||
CreateTableTasks as PbCreateTableTasks, CreateViewTask as PbCreateViewTask,
|
||||
DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
|
||||
DropDatabaseTask as PbDropDatabaseTask, DropFlowTask as PbDropFlowTask,
|
||||
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks,
|
||||
DropViewTask as PbDropViewTask, Partition, ProcedureId,
|
||||
TruncateTableTask as PbTruncateTableTask,
|
||||
};
|
||||
use api::v1::{
|
||||
AlterExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr,
|
||||
DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
|
||||
QueryContext as PbQueryContext, TruncateTableExpr,
|
||||
AlterDatabaseExpr, AlterTableExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr,
|
||||
CreateViewExpr, DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
|
||||
Option as PbOption, QueryContext as PbQueryContext, TruncateTableExpr,
|
||||
};
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine as _;
|
||||
use common_time::DatabaseTimeToLive;
|
||||
use prost::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DefaultOnNull};
|
||||
@@ -42,7 +45,7 @@ use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, InvalidSetDatabaseOptionSnafu, InvalidUnsetDatabaseOptionSnafu, Result};
|
||||
use crate::key::FlowId;
|
||||
|
||||
/// DDL tasks
|
||||
@@ -57,6 +60,7 @@ pub enum DdlTask {
|
||||
AlterLogicalTables(Vec<AlterTableTask>),
|
||||
CreateDatabase(CreateDatabaseTask),
|
||||
DropDatabase(DropDatabaseTask),
|
||||
AlterDatabase(AlterDatabaseTask),
|
||||
CreateFlow(CreateFlowTask),
|
||||
DropFlow(DropFlowTask),
|
||||
CreateView(CreateViewTask),
|
||||
@@ -99,7 +103,7 @@ impl DdlTask {
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to alter several logical tables.
|
||||
pub fn new_alter_logical_tables(table_data: Vec<AlterExpr>) -> Self {
|
||||
pub fn new_alter_logical_tables(table_data: Vec<AlterTableExpr>) -> Self {
|
||||
DdlTask::AlterLogicalTables(
|
||||
table_data
|
||||
.into_iter()
|
||||
@@ -149,8 +153,13 @@ impl DdlTask {
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to alter a database.
|
||||
pub fn new_alter_database(alter_expr: AlterDatabaseExpr) -> Self {
|
||||
DdlTask::AlterDatabase(AlterDatabaseTask { alter_expr })
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to alter a table.
|
||||
pub fn new_alter_table(alter_table: AlterExpr) -> Self {
|
||||
pub fn new_alter_table(alter_table: AlterTableExpr) -> Self {
|
||||
DdlTask::AlterTable(AlterTableTask { alter_table })
|
||||
}
|
||||
|
||||
@@ -223,6 +232,9 @@ impl TryFrom<Task> for DdlTask {
|
||||
Task::DropDatabaseTask(drop_database) => {
|
||||
Ok(DdlTask::DropDatabase(drop_database.try_into()?))
|
||||
}
|
||||
Task::AlterDatabaseTask(alter_database) => {
|
||||
Ok(DdlTask::AlterDatabase(alter_database.try_into()?))
|
||||
}
|
||||
Task::CreateFlowTask(create_flow) => Ok(DdlTask::CreateFlow(create_flow.try_into()?)),
|
||||
Task::DropFlowTask(drop_flow) => Ok(DdlTask::DropFlow(drop_flow.try_into()?)),
|
||||
Task::CreateViewTask(create_view) => Ok(DdlTask::CreateView(create_view.try_into()?)),
|
||||
@@ -272,6 +284,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
||||
}
|
||||
DdlTask::CreateDatabase(task) => Task::CreateDatabaseTask(task.try_into()?),
|
||||
DdlTask::DropDatabase(task) => Task::DropDatabaseTask(task.try_into()?),
|
||||
DdlTask::AlterDatabase(task) => Task::AlterDatabaseTask(task.try_into()?),
|
||||
DdlTask::CreateFlow(task) => Task::CreateFlowTask(task.into()),
|
||||
DdlTask::DropFlow(task) => Task::DropFlowTask(task.into()),
|
||||
DdlTask::CreateView(task) => Task::CreateViewTask(task.try_into()?),
|
||||
@@ -680,7 +693,8 @@ impl<'de> Deserialize<'de> for CreateTableTask {
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct AlterTableTask {
|
||||
pub alter_table: AlterExpr,
|
||||
// TODO(CookiePieWw): Replace proto struct with user-defined struct
|
||||
pub alter_table: AlterTableExpr,
|
||||
}
|
||||
|
||||
impl AlterTableTask {
|
||||
@@ -932,6 +946,121 @@ impl TryFrom<DropDatabaseTask> for PbDropDatabaseTask {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct AlterDatabaseTask {
|
||||
pub alter_expr: AlterDatabaseExpr,
|
||||
}
|
||||
|
||||
impl TryFrom<AlterDatabaseTask> for PbAlterDatabaseTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(task: AlterDatabaseTask) -> Result<Self> {
|
||||
Ok(PbAlterDatabaseTask {
|
||||
task: Some(task.alter_expr),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbAlterDatabaseTask> for AlterDatabaseTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbAlterDatabaseTask) -> Result<Self> {
|
||||
let alter_expr = pb.task.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected alter database",
|
||||
})?;
|
||||
|
||||
Ok(AlterDatabaseTask { alter_expr })
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbAlterDatabaseKind> for AlterDatabaseKind {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbAlterDatabaseKind) -> Result<Self> {
|
||||
match pb {
|
||||
PbAlterDatabaseKind::SetDatabaseOptions(options) => {
|
||||
Ok(AlterDatabaseKind::SetDatabaseOptions(SetDatabaseOptions(
|
||||
options
|
||||
.set_database_options
|
||||
.into_iter()
|
||||
.map(SetDatabaseOption::try_from)
|
||||
.collect::<Result<Vec<_>>>()?,
|
||||
)))
|
||||
}
|
||||
PbAlterDatabaseKind::UnsetDatabaseOptions(options) => Ok(
|
||||
AlterDatabaseKind::UnsetDatabaseOptions(UnsetDatabaseOptions(
|
||||
options
|
||||
.keys
|
||||
.iter()
|
||||
.map(|key| UnsetDatabaseOption::try_from(key.as_str()))
|
||||
.collect::<Result<Vec<_>>>()?,
|
||||
)),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const TTL_KEY: &str = "ttl";
|
||||
|
||||
impl TryFrom<PbOption> for SetDatabaseOption {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(PbOption { key, value }: PbOption) -> Result<Self> {
|
||||
match key.to_ascii_lowercase().as_str() {
|
||||
TTL_KEY => {
|
||||
let ttl = DatabaseTimeToLive::from_humantime_or_str(&value)
|
||||
.map_err(|_| InvalidSetDatabaseOptionSnafu { key, value }.build())?;
|
||||
|
||||
Ok(SetDatabaseOption::Ttl(ttl))
|
||||
}
|
||||
_ => InvalidSetDatabaseOptionSnafu { key, value }.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum SetDatabaseOption {
|
||||
Ttl(DatabaseTimeToLive),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum UnsetDatabaseOption {
|
||||
Ttl,
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for UnsetDatabaseOption {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(key: &str) -> Result<Self> {
|
||||
match key.to_ascii_lowercase().as_str() {
|
||||
TTL_KEY => Ok(UnsetDatabaseOption::Ttl),
|
||||
_ => InvalidUnsetDatabaseOptionSnafu { key }.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub struct SetDatabaseOptions(pub Vec<SetDatabaseOption>);
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub struct UnsetDatabaseOptions(pub Vec<UnsetDatabaseOption>);
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum AlterDatabaseKind {
|
||||
SetDatabaseOptions(SetDatabaseOptions),
|
||||
UnsetDatabaseOptions(UnsetDatabaseOptions),
|
||||
}
|
||||
|
||||
impl AlterDatabaseTask {
|
||||
pub fn catalog(&self) -> &str {
|
||||
&self.alter_expr.catalog_name
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &str {
|
||||
&self.alter_expr.catalog_name
|
||||
}
|
||||
}
|
||||
|
||||
/// Create flow
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreateFlowTask {
|
||||
@@ -1118,7 +1247,7 @@ impl From<QueryContext> for PbQueryContext {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::{AlterExpr, ColumnDef, CreateTableExpr, SemanticType};
|
||||
use api::v1::{AlterTableExpr, ColumnDef, CreateTableExpr, SemanticType};
|
||||
use datatypes::schema::{ColumnSchema, RawSchema, SchemaBuilder};
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use store_api::storage::ConcreteDataType;
|
||||
@@ -1146,7 +1275,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_basic_ser_de_alter_table_task() {
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr::default(),
|
||||
alter_table: AlterTableExpr::default(),
|
||||
};
|
||||
|
||||
let output = serde_json::to_vec(&task).unwrap();
|
||||
|
||||
@@ -245,6 +245,14 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid vector string: {}", vec_str))]
|
||||
InvalidVectorString {
|
||||
vec_str: String,
|
||||
source: DataTypeError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -273,7 +281,8 @@ impl ErrorExt for Error {
|
||||
| Error::IntoVector { source, .. }
|
||||
| Error::FromScalarValue { source, .. }
|
||||
| Error::ConvertArrowSchema { source, .. }
|
||||
| Error::FromArrowArray { source, .. } => source.status_code(),
|
||||
| Error::FromArrowArray { source, .. }
|
||||
| Error::InvalidVectorString { source, .. } => source.status_code(),
|
||||
|
||||
Error::MissingTableMutationHandler { .. }
|
||||
| Error::MissingProcedureServiceHandler { .. }
|
||||
|
||||
@@ -20,6 +20,7 @@ pin-project.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio.workspace = true
|
||||
|
||||
173
src/common/recordbatch/src/cursor.rs
Normal file
173
src/common/recordbatch/src/cursor.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::StreamExt;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::recordbatch::merge_record_batches;
|
||||
use crate::{RecordBatch, SendableRecordBatchStream};
|
||||
|
||||
struct Inner {
|
||||
stream: SendableRecordBatchStream,
|
||||
current_row_index: usize,
|
||||
current_batch: Option<RecordBatch>,
|
||||
total_rows_in_current_batch: usize,
|
||||
}
|
||||
|
||||
/// A cursor on RecordBatchStream that fetches data batch by batch
|
||||
pub struct RecordBatchStreamCursor {
|
||||
inner: Mutex<Inner>,
|
||||
}
|
||||
|
||||
impl RecordBatchStreamCursor {
|
||||
pub fn new(stream: SendableRecordBatchStream) -> RecordBatchStreamCursor {
|
||||
Self {
|
||||
inner: Mutex::new(Inner {
|
||||
stream,
|
||||
current_row_index: 0,
|
||||
current_batch: None,
|
||||
total_rows_in_current_batch: 0,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Take `size` of row from the `RecordBatchStream` and create a new
|
||||
/// `RecordBatch` for these rows.
|
||||
pub async fn take(&self, size: usize) -> Result<RecordBatch> {
|
||||
let mut remaining_rows_to_take = size;
|
||||
let mut accumulated_rows = Vec::new();
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
while remaining_rows_to_take > 0 {
|
||||
// Ensure we have a current batch or fetch the next one
|
||||
if inner.current_batch.is_none()
|
||||
|| inner.current_row_index >= inner.total_rows_in_current_batch
|
||||
{
|
||||
match inner.stream.next().await {
|
||||
Some(Ok(batch)) => {
|
||||
inner.total_rows_in_current_batch = batch.num_rows();
|
||||
inner.current_batch = Some(batch);
|
||||
inner.current_row_index = 0;
|
||||
}
|
||||
Some(Err(e)) => return Err(e),
|
||||
None => {
|
||||
// Stream is exhausted
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we still have no batch after attempting to fetch
|
||||
let current_batch = match &inner.current_batch {
|
||||
Some(batch) => batch,
|
||||
None => break,
|
||||
};
|
||||
|
||||
// Calculate how many rows we can take from this batch
|
||||
let rows_to_take_from_batch = remaining_rows_to_take
|
||||
.min(inner.total_rows_in_current_batch - inner.current_row_index);
|
||||
|
||||
// Slice the current batch to get the desired rows
|
||||
let taken_batch =
|
||||
current_batch.slice(inner.current_row_index, rows_to_take_from_batch)?;
|
||||
|
||||
// Add the taken batch to accumulated rows
|
||||
accumulated_rows.push(taken_batch);
|
||||
|
||||
// Update cursor and remaining rows
|
||||
inner.current_row_index += rows_to_take_from_batch;
|
||||
remaining_rows_to_take -= rows_to_take_from_batch;
|
||||
}
|
||||
|
||||
// If no rows were accumulated, return empty
|
||||
if accumulated_rows.is_empty() {
|
||||
return Ok(RecordBatch::new_empty(inner.stream.schema()));
|
||||
}
|
||||
|
||||
// If only one batch was accumulated, return it directly
|
||||
if accumulated_rows.len() == 1 {
|
||||
return Ok(accumulated_rows.remove(0));
|
||||
}
|
||||
|
||||
// Merge multiple batches
|
||||
merge_record_batches(inner.stream.schema(), &accumulated_rows)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
use crate::RecordBatches;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cursor() {
|
||||
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
|
||||
"a",
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)]));
|
||||
|
||||
let rbs = RecordBatches::try_from_columns(
|
||||
schema.clone(),
|
||||
vec![Arc::new(StringVector::from(vec!["hello", "world"])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let cursor = RecordBatchStreamCursor::new(rbs.as_stream());
|
||||
let result_rb = cursor.take(1).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 1);
|
||||
|
||||
let result_rb = cursor.take(1).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 1);
|
||||
|
||||
let result_rb = cursor.take(1).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 0);
|
||||
|
||||
let rb = RecordBatch::new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(StringVector::from(vec!["hello", "world"])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
let rbs2 =
|
||||
RecordBatches::try_new(schema.clone(), vec![rb.clone(), rb.clone(), rb]).unwrap();
|
||||
let cursor = RecordBatchStreamCursor::new(rbs2.as_stream());
|
||||
let result_rb = cursor.take(3).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 3);
|
||||
let result_rb = cursor.take(2).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 2);
|
||||
let result_rb = cursor.take(2).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 1);
|
||||
let result_rb = cursor.take(2).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 0);
|
||||
|
||||
let rb = RecordBatch::new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(StringVector::from(vec!["hello", "world"])) as _],
|
||||
)
|
||||
.unwrap();
|
||||
let rbs3 =
|
||||
RecordBatches::try_new(schema.clone(), vec![rb.clone(), rb.clone(), rb]).unwrap();
|
||||
let cursor = RecordBatchStreamCursor::new(rbs3.as_stream());
|
||||
let result_rb = cursor.take(10).await.expect("take from cursor failed");
|
||||
assert_eq!(result_rb.num_rows(), 6);
|
||||
}
|
||||
}
|
||||
@@ -161,6 +161,20 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Stream timeout"))]
|
||||
StreamTimeout {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: tokio::time::error::Elapsed,
|
||||
},
|
||||
#[snafu(display("RecordBatch slice index overflow: {visit_index} > {size}"))]
|
||||
RecordBatchSliceIndexOverflow {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
size: usize,
|
||||
visit_index: usize,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -175,7 +189,8 @@ impl ErrorExt for Error {
|
||||
| Error::Format { .. }
|
||||
| Error::ToArrowScalar { .. }
|
||||
| Error::ProjectArrowRecordBatch { .. }
|
||||
| Error::PhysicalExpr { .. } => StatusCode::Internal,
|
||||
| Error::PhysicalExpr { .. }
|
||||
| Error::RecordBatchSliceIndexOverflow { .. } => StatusCode::Internal,
|
||||
|
||||
Error::PollStream { .. } => StatusCode::EngineExecuteQuery,
|
||||
|
||||
@@ -190,6 +205,8 @@ impl ErrorExt for Error {
|
||||
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::StreamTimeout { .. } => StatusCode::Cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![feature(never_type)]
|
||||
|
||||
pub mod adapter;
|
||||
pub mod cursor;
|
||||
pub mod error;
|
||||
pub mod filter;
|
||||
mod recordbatch;
|
||||
|
||||
@@ -23,7 +23,7 @@ use datatypes::value::Value;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use serde::ser::{Error, SerializeStruct};
|
||||
use serde::{Serialize, Serializer};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{
|
||||
self, CastVectorSnafu, ColumnNotExistsSnafu, DataTypesSnafu, ProjectArrowRecordBatchSnafu,
|
||||
@@ -194,6 +194,19 @@ impl RecordBatch {
|
||||
.map(|t| t.to_string())
|
||||
.unwrap_or("failed to pretty display a record batch".to_string())
|
||||
}
|
||||
|
||||
/// Return a slice record batch starts from offset, with len rows
|
||||
pub fn slice(&self, offset: usize, len: usize) -> Result<RecordBatch> {
|
||||
ensure!(
|
||||
offset + len <= self.num_rows(),
|
||||
error::RecordBatchSliceIndexOverflowSnafu {
|
||||
size: self.num_rows(),
|
||||
visit_index: offset + len
|
||||
}
|
||||
);
|
||||
let columns = self.columns.iter().map(|vector| vector.slice(offset, len));
|
||||
RecordBatch::new(self.schema.clone(), columns)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for RecordBatch {
|
||||
@@ -256,6 +269,36 @@ impl Iterator for RecordBatchRowIterator<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
/// merge multiple recordbatch into a single
|
||||
pub fn merge_record_batches(schema: SchemaRef, batches: &[RecordBatch]) -> Result<RecordBatch> {
|
||||
let batches_len = batches.len();
|
||||
if batches_len == 0 {
|
||||
return Ok(RecordBatch::new_empty(schema));
|
||||
}
|
||||
|
||||
let n_rows = batches.iter().map(|b| b.num_rows()).sum();
|
||||
let n_columns = schema.num_columns();
|
||||
// Collect arrays from each batch
|
||||
let mut merged_columns = Vec::with_capacity(n_columns);
|
||||
|
||||
for col_idx in 0..n_columns {
|
||||
let mut acc = schema.column_schemas()[col_idx]
|
||||
.data_type
|
||||
.create_mutable_vector(n_rows);
|
||||
|
||||
for batch in batches {
|
||||
let column = batch.column(col_idx);
|
||||
acc.extend_slice_of(column.as_ref(), 0, column.len())
|
||||
.context(error::DataTypesSnafu)?;
|
||||
}
|
||||
|
||||
merged_columns.push(acc.to_vector());
|
||||
}
|
||||
|
||||
// Create a new RecordBatch with merged columns
|
||||
RecordBatch::new(schema, merged_columns)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -375,4 +418,80 @@ mod tests {
|
||||
|
||||
assert!(record_batch_iter.next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_batch_slice() {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
|
||||
];
|
||||
let schema = Arc::new(Schema::new(column_schemas));
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
None,
|
||||
Some("hello"),
|
||||
Some("greptime"),
|
||||
None,
|
||||
])),
|
||||
];
|
||||
let recordbatch = RecordBatch::new(schema, columns).unwrap();
|
||||
let recordbatch = recordbatch.slice(1, 2).expect("recordbatch slice");
|
||||
let mut record_batch_iter = recordbatch.rows();
|
||||
assert_eq!(
|
||||
vec![Value::UInt32(2), Value::String("hello".into())],
|
||||
record_batch_iter
|
||||
.next()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.collect::<Vec<Value>>()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
vec![Value::UInt32(3), Value::String("greptime".into())],
|
||||
record_batch_iter
|
||||
.next()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.collect::<Vec<Value>>()
|
||||
);
|
||||
|
||||
assert!(record_batch_iter.next().is_none());
|
||||
|
||||
assert!(recordbatch.slice(1, 5).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_record_batch() {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
|
||||
];
|
||||
let schema = Arc::new(Schema::new(column_schemas));
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
None,
|
||||
Some("hello"),
|
||||
Some("greptime"),
|
||||
None,
|
||||
])),
|
||||
];
|
||||
let recordbatch = RecordBatch::new(schema.clone(), columns).unwrap();
|
||||
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
None,
|
||||
Some("hello"),
|
||||
Some("greptime"),
|
||||
None,
|
||||
])),
|
||||
];
|
||||
let recordbatch2 = RecordBatch::new(schema.clone(), columns).unwrap();
|
||||
|
||||
let merged = merge_record_batches(schema.clone(), &[recordbatch, recordbatch2])
|
||||
.expect("merge recordbatch");
|
||||
assert_eq!(merged.num_rows(), 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@ chrono.workspace = true
|
||||
chrono-tz = "0.8"
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
humantime.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
once_cell.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -93,12 +93,28 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse duration"))]
|
||||
ParseDuration {
|
||||
#[snafu(source)]
|
||||
error: humantime::DurationError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Database's TTL can't be `instant`"))]
|
||||
InvalidDatabaseTtl {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::ParseDateStr { .. }
|
||||
| Error::ParseDuration { .. }
|
||||
| Error::InvalidDatabaseTtl { .. }
|
||||
| Error::ParseTimestamp { .. }
|
||||
| Error::InvalidTimezoneOffset { .. }
|
||||
| Error::Format { .. }
|
||||
|
||||
@@ -22,6 +22,7 @@ pub mod time;
|
||||
pub mod timestamp;
|
||||
pub mod timestamp_millis;
|
||||
pub mod timezone;
|
||||
pub mod ttl;
|
||||
pub mod util;
|
||||
|
||||
pub use date::Date;
|
||||
@@ -32,3 +33,4 @@ pub use range::RangeMillis;
|
||||
pub use timestamp::Timestamp;
|
||||
pub use timestamp_millis::TimestampMillis;
|
||||
pub use timezone::Timezone;
|
||||
pub use ttl::{DatabaseTimeToLive, TimeToLive, FOREVER, INSTANT};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user