Compare commits

..

4 Commits

Author SHA1 Message Date
Ruihang Xia
038bc4fe6e revert toml format
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-03-14 00:46:00 +08:00
Ruihang Xia
6d07c422d8 Merge branch 'main' into fix-proto-clear
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-03-14 00:36:28 +08:00
Ruihang Xia
6c14ece23f accomplish test assertion
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-03-14 00:32:49 +08:00
Ruihang Xia
89c51d9b87 reset Sample
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-03-13 23:32:22 +08:00
363 changed files with 4419 additions and 13982 deletions

View File

@@ -70,7 +70,7 @@ runs:
- name: Build greptime binary
shell: pwsh
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts

View File

@@ -40,4 +40,3 @@ jobs:
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc
single-commit: true

View File

@@ -117,7 +117,7 @@ jobs:
artifacts-dir: bins
version: current
fuzztest:
fuzztest:
name: Fuzz Test
needs: build
runs-on: ubuntu-latest
@@ -148,7 +148,7 @@ jobs:
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: |
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
@@ -279,10 +279,6 @@ jobs:
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov

View File

@@ -1,21 +0,0 @@
name: Auto Unassign
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
auto-unassign:
name: Auto Unassign
runs-on: ubuntu-latest
steps:
- name: Auto Unassign
uses: tisonspieces/auto-unassign@main
with:
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
repository: ${{ github.repository }}

132
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
info@greptime.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

393
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -62,7 +62,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.7.1"
version = "0.7.0"
edition = "2021"
license = "Apache-2.0"
@@ -103,14 +103,13 @@ etcd-client = "0.12"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "06f6297ff3cab578a1589741b504342fbad70453" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96f1f0404f421ee560a4310c73c5071e49168168" }
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
mockall = "0.11.4"
moka = "0.12"
notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18"
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
@@ -126,7 +125,7 @@ prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
regex = "1.8"
regex-automata = { version = "0.4" }
regex-automata = { version = "0.2", features = ["transducer"] }
reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls-native-roots",
@@ -134,7 +133,6 @@ reqwest = { version = "0.11", default-features = false, features = [
] }
rskafka = "0.5"
rust_decimal = "1.33"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3"

219
README.md
View File

@@ -6,154 +6,145 @@
</picture>
</p>
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
<div align="center">
<h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4>
The next-generation hybrid time-series/analytics processing database in the cloud
</h3>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
</a>
<a href="https://hub.docker.com/r/greptime/greptimedb/">
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
</a>
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
</a>
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
</a>
<p align="center">
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C"></img></a>
&nbsp;
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
&nbsp;
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
</p>
<br/>
<p align="center">
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
<a href="https://greptime.com/slack">
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
</a>
<a href="https://twitter.com/greptime">
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
</a>
<a href="https://www.linkedin.com/company/greptime/">
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
</a>
</div>
## What is GreptimeDB
## Introduction
GreptimeDB is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
It's designed to work on infrastructure of the cloud era, and users benefit from its elasticity and commodity storage.
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
Our core developers have been building time-series data platforms for years. Based on their best-practices, GreptimeDB is born to give you:
## Why GreptimeDB
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
- Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
- Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
- Native SQL and PromQL for queries, and Python scripting to facilitate complex analytical tasks.
- Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down.
- Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc.
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
## Quick Start
* **Easy horizontal scaling**
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
* **Analyzing time-series data**
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
* **Cloud-native distributed database**
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
* **Performance and Cost-effective**
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
## Try GreptimeDB
### 1. [GreptimePlay](https://greptime.com/playground)
### [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/)
### Build
Start instantly with a free cluster.
#### Build from Source
### 3. Docker Image
To compile GreptimeDB from source, you'll need:
To install GreptimeDB locally, the recommended way is via Docker:
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
available as `build-essential` on ubuntu and similar name on other platforms.
- Rust: the easiest way to install Rust is to use
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
```shell
docker pull greptime/greptimedb
#### Build with Docker
A docker image with necessary dependencies is provided:
```
docker build --network host -f docker/Dockerfile -t greptimedb .
```
Start a GreptimeDB container with:
### Run
Start GreptimeDB from source code, in standalone mode:
```shell
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
```
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
## Build
Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
Build GreptimeDB binary:
```shell
make
```
Run a standalone server:
```shell
cargo run -- standalone start
```
## Extension
Or if you built from docker:
```
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
### Get started
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
## Resources
### Installation
- [Pre-built Binaries](https://greptime.com/download):
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
### SDK
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptime-ingester-js)
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
### Grafana Dashboard
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
Our official Grafana dashboard is available at [grafana](./grafana/README.md) directory.
## Project Status
The current version has not yet reached General Availability version standards.
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
This project is in its early stage and under heavy development. We move fast and
break things. Benchmark on development branch may not represent its potential
performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community
@@ -163,12 +154,12 @@ and what went wrong. If you have any questions or if you would like to get invol
community, please check out:
- GreptimeDB Community on [Slack](https://greptime.com/slack)
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [website](https://greptime.com)
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [Website](https://greptime.com)
In addition, you may:
- View our official [Blog](https://greptime.com/blogs/)
- View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
@@ -179,7 +170,7 @@ open contributions and allowing you to use the software however you want.
## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement

View File

@@ -14,5 +14,6 @@ clap.workspace = true
client.workspace = true
futures-util.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
parquet.workspace = true
tokio.workspace = true

View File

@@ -140,9 +140,9 @@ intermediate_path = ""
[region_engine.mito.memtable]
# Memtable type.
# - "partition_tree": partition tree memtable
# - "experimental": experimental memtable
# - "time_series": time-series memtable (deprecated)
type = "partition_tree"
type = "experimental"
# The max number of keys in one shard.
index_max_keys_per_shard = 8192
# The max rows of data inside the actively writing buffer in one shard.

View File

@@ -246,9 +246,9 @@ intermediate_path = ""
[region_engine.mito.memtable]
# Memtable type.
# - "partition_tree": partition tree memtable
# - "experimental": experimental memtable
# - "time_series": time-series memtable (deprecated)
type = "partition_tree"
type = "experimental"
# The max number of keys in one shard.
index_max_keys_per_shard = 8192
# The max rows of data inside the actively writing buffer in one shard.

View File

@@ -1,50 +0,0 @@
# TSBS benchmark - v0.7.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | -------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 50GB (GP3) |
| OS | Ubuntu 22.04.1 |
## Write performance
| Environment | Ingest rate (rows/s) |
| ------------------ | --------------------- |
| Local | 3695814.64 |
| EC2 c5d.2xlarge | 2987166.64 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | ---------------------- |
| cpu-max-all-1 | 30.56 | 54.74 |
| cpu-max-all-8 | 52.69 | 70.50 |
| double-groupby-1 | 664.30 | 1366.63 |
| double-groupby-5 | 1391.26 | 2141.71 |
| double-groupby-all | 2828.94 | 3389.59 |
| groupby-orderby-limit | 718.92 | 1213.90 |
| high-cpu-1 | 29.21 | 52.98 |
| high-cpu-all | 5514.12 | 7194.91 |
| lastpoint | 7571.40 | 9423.41 |
| single-groupby-1-1-1 | 19.09 | 7.77 |
| single-groupby-1-1-12 | 27.28 | 51.64 |
| single-groupby-1-8-1 | 31.85 | 11.64 |
| single-groupby-5-1-1 | 16.14 | 9.67 |
| single-groupby-5-1-12 | 27.21 | 53.62 |
| single-groupby-5-8-1 | 39.62 | 14.96 |

View File

@@ -19,12 +19,6 @@ includes = [
"*.py",
]
excludes = [
# copied sources
"src/common/base/src/readable_size.rs",
"src/servers/src/repeated_field.rs",
]
[properties]
inceptionYear = 2023
copyrightOwner = "Greptime Team"

View File

@@ -27,7 +27,7 @@ function retry_fetch() {
echo "Failed to download $url"
echo "You may try to set http_proxy and https_proxy environment variables."
if [[ -z "$GITHUB_PROXY_URL" ]]; then
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/https://github.com/"
echo "You may try to set GITHUB_PROXY_URL=http://mirror.ghproxy.com/"
fi
exit 1
}
@@ -39,7 +39,7 @@ function retry_fetch() {
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/sha256.txt" sha256.txt
# Download the tar file containing the built dashboard assets.
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/${RELEASE_VERSION}/build.tar.gz" build.tar.gz
retry_fetch "${GITHUB_URL}/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz" build.tar.gz
# Verify the checksums match; exit if they don't.
case "$(uname -s)" in

View File

@@ -18,6 +18,7 @@ greptime-proto.workspace = true
paste = "1.0"
prost.workspace = true
snafu.workspace = true
tonic.workspace = true
[build-dependencies]
tonic-build = "0.9"

View File

@@ -707,6 +707,7 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
}
pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> {
// TODO(fys): use macros to optimize code
match data_type {
ConcreteDataType::Int64(_) => values
.i64_values

View File

@@ -16,9 +16,8 @@ api.workspace = true
async-trait.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
digest = "0.10"
notify.workspace = true
hex = { version = "0.4" }
secrecy = { version = "0.8", features = ["serde", "alloc"] }
sha1 = "0.10"
snafu.workspace = true

View File

@@ -22,9 +22,6 @@ use snafu::{ensure, OptionExt};
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
use crate::user_info::DefaultUserInfo;
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
use crate::user_provider::watch_file_user_provider::{
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
};
use crate::{UserInfoRef, UserProviderRef};
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
@@ -43,12 +40,9 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
match name {
STATIC_USER_PROVIDER => {
let provider =
StaticUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)?;
StaticUserProvider::try_from(content).map(|p| Arc::new(p) as UserProviderRef)?;
Ok(provider)
}
WATCH_FILE_USER_PROVIDER => {
WatchFileUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)
}
_ => InvalidConfigSnafu {
value: name.to_string(),
msg: "Invalid UserProviderOption",

View File

@@ -64,13 +64,6 @@ pub enum Error {
username: String,
},
#[snafu(display("Failed to initialize a watcher for file {}", path))]
FileWatch {
path: String,
#[snafu(source)]
error: notify::Error,
},
#[snafu(display("User is not authorized to perform this action"))]
PermissionDenied { location: Location },
}
@@ -80,7 +73,6 @@ impl ErrorExt for Error {
match self {
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
Error::FileWatch { .. } => StatusCode::InvalidArguments,
Error::InternalState { .. } => StatusCode::Unexpected,
Error::Io { .. } => StatusCode::Internal,
Error::AuthBackend { .. } => StatusCode::Internal,

View File

@@ -13,24 +13,10 @@
// limitations under the License.
pub(crate) mod static_user_provider;
pub(crate) mod watch_file_user_provider;
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::common::{Identity, Password};
use crate::error::{
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
UserNotFoundSnafu, UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
use crate::{auth_mysql, UserInfoRef};
use crate::error::Result;
use crate::UserInfoRef;
#[async_trait::async_trait]
pub trait UserProvider: Send + Sync {
@@ -58,88 +44,3 @@ pub trait UserProvider: Send + Sync {
Ok(user_info)
}
}
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
// check valid path
let path = Path::new(filepath);
if !path.exists() {
return Ok(None);
}
ensure!(
path.is_file(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider file must be a file",
}
);
let file = File::open(path).context(IoSnafu)?;
let credential = io::BufReader::new(file)
.lines()
.map_while(std::result::Result::ok)
.filter_map(|line| {
if let Some((k, v)) = line.split_once('=') {
Some((k.to_string(), v.as_bytes().to_vec()))
} else {
None
}
})
.collect::<HashMap<String, Vec<u8>>>();
ensure!(
!credential.is_empty(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider's file must contains at least one valid credential",
}
);
Ok(Some(credential))
}
fn authenticate_with_credential(
users: &HashMap<String, Vec<u8>>,
input_id: Identity<'_>,
input_pwd: Password<'_>,
) -> Result<UserInfoRef> {
match input_id {
Identity::UserId(username, _) => {
ensure!(
!username.is_empty(),
IllegalParamSnafu {
msg: "blank username"
}
);
let save_pwd = users.get(username).context(UserNotFoundSnafu {
username: username.to_string(),
})?;
match input_pwd {
Password::PlainText(pwd) => {
ensure!(
!pwd.expose_secret().is_empty(),
IllegalParamSnafu {
msg: "blank password"
}
);
if save_pwd == pwd.expose_secret().as_bytes() {
Ok(DefaultUserInfo::with_name(username))
} else {
UserPasswordMismatchSnafu {
username: username.to_string(),
}
.fail()
}
}
Password::MysqlNativePassword(auth_data, salt) => {
auth_mysql(auth_data, salt, username, save_pwd)
.map(|_| DefaultUserInfo::with_name(username))
}
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
password_type: "pg_md5",
}
.fail(),
}
}
}
}

View File

@@ -13,34 +13,60 @@
// limitations under the License.
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use async_trait::async_trait;
use snafu::OptionExt;
use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{InvalidConfigSnafu, Result};
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
use crate::{Identity, Password, UserInfoRef, UserProvider};
use crate::error::{
Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
UserNotFoundSnafu, UserPasswordMismatchSnafu,
};
use crate::user_info::DefaultUserInfo;
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
pub(crate) struct StaticUserProvider {
users: HashMap<String, Vec<u8>>,
}
impl TryFrom<&str> for StaticUserProvider {
type Error = Error;
impl StaticUserProvider {
pub(crate) fn new(value: &str) -> Result<Self> {
fn try_from(value: &str) -> Result<Self> {
let (mode, content) = value.split_once(':').context(InvalidConfigSnafu {
value: value.to_string(),
msg: "StaticUserProviderOption must be in format `<option>:<value>`",
})?;
return match mode {
"file" => {
let users = load_credential_from_file(content)?
.context(InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticFileUserProvider must be a valid file path",
})?;
Ok(StaticUserProvider { users })
// check valid path
let path = Path::new(content);
ensure!(path.exists() && path.is_file(), InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticUserProviderOption file must be a valid file path",
});
let file = File::open(path).context(IoSnafu)?;
let credential = io::BufReader::new(file)
.lines()
.map_while(std::result::Result::ok)
.filter_map(|line| {
if let Some((k, v)) = line.split_once('=') {
Some((k.to_string(), v.as_bytes().to_vec()))
} else {
None
}
})
.collect::<HashMap<String, Vec<u8>>>();
ensure!(!credential.is_empty(), InvalidConfigSnafu {
value: content.to_string(),
msg: "StaticUserProviderOption file must contains at least one valid credential",
});
Ok(StaticUserProvider { users: credential, })
}
"cmd" => content
.split(',')
@@ -57,19 +83,66 @@ impl StaticUserProvider {
value: mode.to_string(),
msg: "StaticUserProviderOption must be in format `file:<path>` or `cmd:<values>`",
}
.fail(),
.fail(),
};
}
}
pub(crate) struct StaticUserProvider {
users: HashMap<String, Vec<u8>>,
}
#[async_trait]
impl UserProvider for StaticUserProvider {
fn name(&self) -> &str {
STATIC_USER_PROVIDER
}
async fn authenticate(&self, id: Identity<'_>, pwd: Password<'_>) -> Result<UserInfoRef> {
authenticate_with_credential(&self.users, id, pwd)
async fn authenticate(
&self,
input_id: Identity<'_>,
input_pwd: Password<'_>,
) -> Result<UserInfoRef> {
match input_id {
Identity::UserId(username, _) => {
ensure!(
!username.is_empty(),
IllegalParamSnafu {
msg: "blank username"
}
);
let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
username: username.to_string(),
})?;
match input_pwd {
Password::PlainText(pwd) => {
ensure!(
!pwd.expose_secret().is_empty(),
IllegalParamSnafu {
msg: "blank password"
}
);
return if save_pwd == pwd.expose_secret().as_bytes() {
Ok(DefaultUserInfo::with_name(username))
} else {
UserPasswordMismatchSnafu {
username: username.to_string(),
}
.fail()
};
}
Password::MysqlNativePassword(auth_data, salt) => {
auth_mysql(auth_data, salt, username, save_pwd)
.map(|_| DefaultUserInfo::with_name(username))
}
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
password_type: "pg_md5",
}
.fail(),
}
}
}
}
async fn authorize(
@@ -108,7 +181,7 @@ pub mod test {
#[tokio::test]
async fn test_authorize() {
let user_info = DefaultUserInfo::with_name("root");
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
provider
.authorize("catalog", "schema", &user_info)
.await
@@ -117,7 +190,7 @@ pub mod test {
#[tokio::test]
async fn test_inline_provider() {
let provider = StaticUserProvider::new("cmd:root=123456,admin=654321").unwrap();
let provider = StaticUserProvider::try_from("cmd:root=123456,admin=654321").unwrap();
test_authenticate(&provider, "root", "123456").await;
test_authenticate(&provider, "admin", "654321").await;
}
@@ -141,7 +214,7 @@ admin=654321",
}
let param = format!("file:{file_path}");
let provider = StaticUserProvider::new(param.as_str()).unwrap();
let provider = StaticUserProvider::try_from(param.as_str()).unwrap();
test_authenticate(&provider, "root", "123456").await;
test_authenticate(&provider, "admin", "654321").await;
}

View File

@@ -1,215 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::path::Path;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_telemetry::{info, warn};
use notify::{EventKind, RecursiveMode, Watcher};
use snafu::{ensure, ResultExt};
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
use crate::user_info::DefaultUserInfo;
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
use crate::{Identity, Password, UserInfoRef, UserProvider};
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
/// A user provider that reads user credential from a file and watches the file for changes.
///
/// Empty file is invalid; but file not exist means every user can be authenticated.
pub(crate) struct WatchFileUserProvider {
users: WatchedCredentialRef,
}
impl WatchFileUserProvider {
pub fn new(filepath: &str) -> Result<Self> {
let credential = load_credential_from_file(filepath)?;
let users = Arc::new(Mutex::new(credential));
let this = WatchFileUserProvider {
users: users.clone(),
};
let (tx, rx) = channel::<notify::Result<notify::Event>>();
let mut debouncer =
notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
let mut dir = Path::new(filepath).to_path_buf();
ensure!(
dir.pop(),
InvalidConfigSnafu {
value: filepath,
msg: "UserProvider path must be a file path",
}
);
debouncer
.watch(&dir, RecursiveMode::NonRecursive)
.context(FileWatchSnafu { path: filepath })?;
let filepath = filepath.to_string();
std::thread::spawn(move || {
let filename = Path::new(&filepath).file_name();
let _hold = debouncer;
while let Ok(res) = rx.recv() {
if let Ok(event) = res {
let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
let is_relevant_event = matches!(
event.kind,
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
);
if is_this_file && is_relevant_event {
info!(?event.kind, "User provider file {} changed", &filepath);
match load_credential_from_file(&filepath) {
Ok(credential) => {
let mut users =
users.lock().expect("users credential must be valid");
#[cfg(not(test))]
info!("User provider file {filepath} reloaded");
#[cfg(test)]
info!("User provider file {filepath} reloaded: {credential:?}");
*users = credential;
}
Err(err) => {
warn!(
?err,
"Fail to load credential from file {filepath}; keep the old one",
)
}
}
}
}
}
});
Ok(this)
}
}
#[async_trait]
impl UserProvider for WatchFileUserProvider {
fn name(&self) -> &str {
WATCH_FILE_USER_PROVIDER
}
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
let users = self.users.lock().expect("users credential must be valid");
if let Some(users) = users.as_ref() {
authenticate_with_credential(users, id, password)
} else {
match id {
Identity::UserId(id, _) => {
warn!(id, "User provider file not exist, allow all users");
Ok(DefaultUserInfo::with_name(id))
}
}
}
}
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
// default allow all
Ok(())
}
}
#[cfg(test)]
pub mod test {
use std::time::{Duration, Instant};
use common_test_util::temp_dir::create_temp_dir;
use tokio::time::sleep;
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
use crate::user_provider::{Identity, Password};
use crate::UserProvider;
async fn test_authenticate(
provider: &dyn UserProvider,
username: &str,
password: &str,
ok: bool,
timeout: Option<Duration>,
) {
if let Some(timeout) = timeout {
let deadline = Instant::now().checked_add(timeout).unwrap();
loop {
let re = provider
.authenticate(
Identity::UserId(username, None),
Password::PlainText(password.to_string().into()),
)
.await;
if re.is_ok() == ok {
break;
} else if Instant::now() < deadline {
sleep(Duration::from_millis(100)).await;
} else {
panic!("timeout (username: {username}, password: {password}, expected: {ok})");
}
}
} else {
let re = provider
.authenticate(
Identity::UserId(username, None),
Password::PlainText(password.to_string().into()),
)
.await;
assert_eq!(
re.is_ok(),
ok,
"username: {}, password: {}",
username,
password
);
}
}
#[tokio::test]
async fn test_file_provider() {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("test_file_provider");
let file_path = format!("{}/test_file_provider", dir.path().to_str().unwrap());
// write a tmp file
assert!(std::fs::write(&file_path, "root=123456\nadmin=654321\n").is_ok());
let provider = WatchFileUserProvider::new(file_path.as_str()).unwrap();
let timeout = Duration::from_secs(60);
test_authenticate(&provider, "root", "123456", true, None).await;
test_authenticate(&provider, "admin", "654321", true, None).await;
test_authenticate(&provider, "root", "654321", false, None).await;
// update the tmp file
assert!(std::fs::write(&file_path, "root=654321\n").is_ok());
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
// remove the tmp file
assert!(std::fs::remove_file(&file_path).is_ok());
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
// recreate the tmp file
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
test_authenticate(&provider, "root", "654321", false, Some(timeout)).await;
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
}
}

View File

@@ -12,16 +12,19 @@ workspace = true
[dependencies]
api.workspace = true
arc-swap = "1.0"
arrow.workspace = true
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
common-catalog.workspace = true
common-error.workspace = true
common-grpc.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-version.workspace = true
@@ -34,9 +37,12 @@ itertools.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
moka = { workspace = true, features = ["future", "sync"] }
parking_lot = "0.12"
partition.workspace = true
paste = "1.0"
prometheus.workspace = true
regex.workspace = true
serde.workspace = true
serde_json.workspace = true
session.workspace = true
snafu.workspace = true

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod columns;
pub mod key_column_usage;
mod columns;
mod key_column_usage;
mod memory_table;
mod partitions;
mod predicate;

View File

@@ -48,16 +48,16 @@ pub(super) struct InformationSchemaColumns {
catalog_manager: Weak<dyn CatalogManager>,
}
pub const TABLE_CATALOG: &str = "table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name";
pub const COLUMN_NAME: &str = "column_name";
pub const DATA_TYPE: &str = "data_type";
pub const SEMANTIC_TYPE: &str = "semantic_type";
pub const COLUMN_DEFAULT: &str = "column_default";
pub const IS_NULLABLE: &str = "is_nullable";
const TABLE_CATALOG: &str = "table_catalog";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const COLUMN_NAME: &str = "column_name";
const DATA_TYPE: &str = "data_type";
const SEMANTIC_TYPE: &str = "semantic_type";
const COLUMN_DEFAULT: &str = "column_default";
const IS_NULLABLE: &str = "is_nullable";
const COLUMN_TYPE: &str = "column_type";
pub const COLUMN_COMMENT: &str = "column_comment";
const COLUMN_COMMENT: &str = "column_comment";
const INIT_CAPACITY: usize = 42;
impl InformationSchemaColumns {

View File

@@ -37,16 +37,13 @@ use crate::error::{
use crate::information_schema::{InformationTable, Predicates};
use crate::CatalogManager;
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
pub const CONSTRAINT_NAME: &str = "constraint_name";
// It's always `def` in MySQL
pub const TABLE_CATALOG: &str = "table_catalog";
// The real catalog name for this key column.
pub const REAL_TABLE_CATALOG: &str = "real_table_catalog";
pub const TABLE_SCHEMA: &str = "table_schema";
pub const TABLE_NAME: &str = "table_name";
pub const COLUMN_NAME: &str = "column_name";
pub const ORDINAL_POSITION: &str = "ordinal_position";
const CONSTRAINT_SCHEMA: &str = "constraint_schema";
const CONSTRAINT_NAME: &str = "constraint_name";
const TABLE_CATALOG: &str = "table_catalog";
const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const COLUMN_NAME: &str = "column_name";
const ORDINAL_POSITION: &str = "ordinal_position";
const INIT_CAPACITY: usize = 42;
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
@@ -79,11 +76,6 @@ impl InformationSchemaKeyColumnUsage {
),
ColumnSchema::new(CONSTRAINT_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
REAL_TABLE_CATALOG,
ConcreteDataType::string_datatype(),
false,
),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
@@ -166,7 +158,6 @@ struct InformationSchemaKeyColumnUsageBuilder {
constraint_schema: StringVectorBuilder,
constraint_name: StringVectorBuilder,
table_catalog: StringVectorBuilder,
real_table_catalog: StringVectorBuilder,
table_schema: StringVectorBuilder,
table_name: StringVectorBuilder,
column_name: StringVectorBuilder,
@@ -188,7 +179,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
real_table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -233,7 +223,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
&predicates,
&schema_name,
"TIME INDEX",
&catalog_name,
&schema_name,
&table_name,
&column.name,
@@ -242,7 +231,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
}
if keys.contains(&idx) {
primary_constraints.push((
catalog_name.clone(),
schema_name.clone(),
table_name.clone(),
column.name.clone(),
@@ -256,14 +244,13 @@ impl InformationSchemaKeyColumnUsageBuilder {
}
}
for (i, (catalog_name, schema_name, table_name, column_name)) in
for (i, (schema_name, table_name, column_name)) in
primary_constraints.into_iter().enumerate()
{
self.add_key_column_usage(
&predicates,
&schema_name,
"PRIMARY",
&catalog_name,
&schema_name,
&table_name,
&column_name,
@@ -282,7 +269,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
predicates: &Predicates,
constraint_schema: &str,
constraint_name: &str,
table_catalog: &str,
table_schema: &str,
table_name: &str,
column_name: &str,
@@ -291,7 +277,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
let row = [
(CONSTRAINT_SCHEMA, &Value::from(constraint_schema)),
(CONSTRAINT_NAME, &Value::from(constraint_name)),
(REAL_TABLE_CATALOG, &Value::from(table_catalog)),
(TABLE_SCHEMA, &Value::from(table_schema)),
(TABLE_NAME, &Value::from(table_name)),
(COLUMN_NAME, &Value::from(column_name)),
@@ -306,7 +291,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.constraint_schema.push(Some(constraint_schema));
self.constraint_name.push(Some(constraint_name));
self.table_catalog.push(Some("def"));
self.real_table_catalog.push(Some(table_catalog));
self.table_schema.push(Some(table_schema));
self.table_name.push(Some(table_name));
self.column_name.push(Some(column_name));
@@ -326,7 +310,6 @@ impl InformationSchemaKeyColumnUsageBuilder {
Arc::new(self.constraint_schema.finish()),
Arc::new(self.constraint_name.finish()),
Arc::new(self.table_catalog.finish()),
Arc::new(self.real_table_catalog.finish()),
Arc::new(self.table_schema.finish()),
Arc::new(self.table_name.finish()),
Arc::new(self.column_name.finish()),

View File

@@ -14,15 +14,13 @@
use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
use common_catalog::consts::MITO_ENGINE;
use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector};
use crate::information_schema::table_names::*;
const NO_VALUE: &str = "NO";
/// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
@@ -61,15 +59,14 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
"SAVEPOINTS",
]),
vec![
Arc::new(StringVector::from(vec![MITO_ENGINE, METRIC_ENGINE])),
Arc::new(StringVector::from(vec!["DEFAULT", "YES"])),
Arc::new(StringVector::from(vec![MITO_ENGINE])),
Arc::new(StringVector::from(vec!["DEFAULT"])),
Arc::new(StringVector::from(vec![
"Storage engine for time-series data",
"Storage engine for observability scenarios, which is adept at handling a large number of small tables, making it particularly suitable for cloud-native monitoring",
])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
Arc::new(StringVector::from(vec![NO_VALUE, NO_VALUE])),
Arc::new(StringVector::from(vec!["NO"])),
Arc::new(StringVector::from(vec!["NO"])),
Arc::new(StringVector::from(vec!["NO"])),
],
),

View File

@@ -23,14 +23,15 @@ use common_catalog::consts::{
};
use common_catalog::format_full_table_name;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
use common_meta::instruction::CacheIdent;
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
use common_meta::error::Result as MetaResult;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::table_name::TableName;
use futures_util::stream::BoxStream;
use futures_util::{StreamExt, TryStreamExt};
use moka::future::{Cache as AsyncCache, CacheBuilder};
@@ -38,13 +39,14 @@ use moka::sync::Cache;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*;
use table::dist_table::DistTable;
use table::metadata::TableId;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::TableRef;
use crate::error::Error::{GetTableCache, TableCacheNotGet};
use crate::error::{
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu,
Result as CatalogResult, TableCacheNotGetSnafu, TableMetadataManagerSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::CatalogManager;
@@ -56,6 +58,10 @@ use crate::CatalogManager;
/// comes from `SystemCatalog`, which is static and read-only.
#[derive(Clone)]
pub struct KvBackendCatalogManager {
// TODO(LFC): Maybe use a real implementation for Standalone mode.
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
// is implemented by RaftEngine. Maybe we need a cache for it?
cache_invalidator: CacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef,
/// A sub-CatalogManager that handles system tables
@@ -63,33 +69,33 @@ pub struct KvBackendCatalogManager {
table_cache: AsyncCache<String, TableRef>,
}
struct TableCacheInvalidator {
table_cache: AsyncCache<String, TableRef>,
}
impl TableCacheInvalidator {
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
Self { table_cache }
}
fn make_table(table_info_value: TableInfoValue) -> CatalogResult<TableRef> {
let table_info = table_info_value
.table_info
.try_into()
.context(catalog_err::InvalidTableInfoInCatalogSnafu)?;
Ok(DistTable::table(Arc::new(table_info)))
}
#[async_trait::async_trait]
impl CacheInvalidator for TableCacheInvalidator {
async fn invalidate(
&self,
_ctx: &Context,
caches: Vec<CacheIdent>,
) -> common_meta::error::Result<()> {
for cache in caches {
if let CacheIdent::TableName(table_name) = cache {
let table_cache_key = format_full_table_name(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
);
self.table_cache.invalidate(&table_cache_key).await;
}
}
impl CacheInvalidator for KvBackendCatalogManager {
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
self.cache_invalidator
.invalidate_table_id(ctx, table_id)
.await
}
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
let table_cache_key = format_full_table_name(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
);
self.cache_invalidator
.invalidate_table_name(ctx, table_name)
.await?;
self.table_cache.invalidate(&table_cache_key).await;
Ok(())
}
}
@@ -100,21 +106,11 @@ const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
impl KvBackendCatalogManager {
pub async fn new(
backend: KvBackendRef,
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
) -> Arc<Self> {
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
.time_to_live(TABLE_CACHE_TTL)
.time_to_idle(TABLE_CACHE_TTI)
.build();
multi_cache_invalidator
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
.await;
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
Arc::new_cyclic(|me| Self {
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
cache_invalidator,
system_catalog: SystemCatalog {
catalog_manager: me.clone(),
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
@@ -123,7 +119,10 @@ impl KvBackendCatalogManager {
me.clone(),
)),
},
table_cache,
table_cache: CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
.time_to_live(TABLE_CACHE_TTL)
.time_to_idle(TABLE_CACHE_TTI)
.build(),
})
}
@@ -142,11 +141,12 @@ impl CatalogManager for KvBackendCatalogManager {
self
}
async fn catalog_names(&self) -> Result<Vec<String>> {
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.catalog_manager()
.catalog_names();
.catalog_names()
.await;
let keys = stream
.try_collect::<Vec<_>>()
@@ -157,11 +157,12 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys)
}
async fn schema_names(&self, catalog: &str) -> Result<Vec<String>> {
async fn schema_names(&self, catalog: &str) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.schema_manager()
.schema_names(catalog);
.schema_names(catalog)
.await;
let mut keys = stream
.try_collect::<BTreeSet<_>>()
.await
@@ -173,11 +174,12 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(keys.into_iter().collect())
}
async fn table_names(&self, catalog: &str, schema: &str) -> Result<Vec<String>> {
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
.table_name_manager()
.tables(catalog, schema);
.tables(catalog, schema)
.await;
let mut tables = stream
.try_collect::<Vec<_>>()
.await
@@ -191,7 +193,7 @@ impl CatalogManager for KvBackendCatalogManager {
Ok(tables.into_iter().collect())
}
async fn catalog_exists(&self, catalog: &str) -> Result<bool> {
async fn catalog_exists(&self, catalog: &str) -> CatalogResult<bool> {
self.table_metadata_manager
.catalog_manager()
.exists(CatalogNameKey::new(catalog))
@@ -199,7 +201,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)
}
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
async fn schema_exists(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
if self.system_catalog.schema_exist(schema) {
return Ok(true);
}
@@ -211,7 +213,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)
}
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
if self.system_catalog.table_exist(schema, table) {
return Ok(true);
}
@@ -230,7 +232,7 @@ impl CatalogManager for KvBackendCatalogManager {
catalog: &str,
schema: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
) -> CatalogResult<Option<TableRef>> {
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
return Ok(Some(table));
}
@@ -264,7 +266,7 @@ impl CatalogManager for KvBackendCatalogManager {
}
.fail();
};
build_table(table_info_value)
make_table(table_info_value)
};
match self
@@ -287,7 +289,7 @@ impl CatalogManager for KvBackendCatalogManager {
&'a self,
catalog: &'a str,
schema: &'a str,
) -> BoxStream<'a, Result<TableRef>> {
) -> BoxStream<'a, CatalogResult<TableRef>> {
let sys_tables = try_stream!({
// System tables
let sys_table_names = self.system_catalog.table_names(schema);
@@ -302,6 +304,7 @@ impl CatalogManager for KvBackendCatalogManager {
.table_metadata_manager
.table_name_manager()
.tables(catalog, schema)
.await
.map_ok(|(_, v)| v.table_id());
const BATCH_SIZE: usize = 128;
let user_tables = try_stream!({
@@ -311,7 +314,7 @@ impl CatalogManager for KvBackendCatalogManager {
while let Some(table_ids) = table_id_chunks.next().await {
let table_ids = table_ids
.into_iter()
.collect::<std::result::Result<Vec<_>, _>>()
.collect::<Result<Vec<_>, _>>()
.map_err(BoxedError::new)
.context(ListTablesSnafu { catalog, schema })?;
@@ -323,7 +326,7 @@ impl CatalogManager for KvBackendCatalogManager {
.context(TableMetadataManagerSnafu)?;
for table_info_value in table_info_values.into_values() {
yield build_table(table_info_value)?;
yield make_table(table_info_value)?;
}
}
});
@@ -332,14 +335,6 @@ impl CatalogManager for KvBackendCatalogManager {
}
}
fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
let table_info = table_info_value
.table_info
.try_into()
.context(InvalidTableInfoInCatalogSnafu)?;
Ok(DistTable::table(Arc::new(table_info)))
}
// TODO: This struct can hold a static map of all system tables when
// the upper layer (e.g., procedure) can inform the catalog manager
// a new catalog is created.

View File

@@ -19,10 +19,10 @@ use std::any::Any;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use api::v1::CreateTableExpr;
use futures::future::BoxFuture;
use futures_util::stream::BoxStream;
use table::metadata::TableId;
use table::requests::CreateTableRequest;
use table::TableRef;
use crate::error::Result;
@@ -75,9 +75,9 @@ pub type OpenSystemTableHook =
/// Register system table request:
/// - When system table is already created and registered, the hook will be called
/// with table ref after opening the system table
/// - When system table is not exists, create and register the table by `create_table_expr` and calls `open_hook` with the created table.
/// - When system table is not exists, create and register the table by create_table_request and calls open_hook with the created table.
pub struct RegisterSystemTableRequest {
pub create_table_expr: CreateTableExpr,
pub create_table_request: CreateTableRequest,
pub open_hook: Option<OpenSystemTableHook>,
}

View File

@@ -16,6 +16,7 @@ arc-swap = "1.6"
arrow-flight.workspace = true
async-stream.workspace = true
async-trait.workspace = true
common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
common-grpc.workspace = true
@@ -24,6 +25,10 @@ common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
datafusion.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
enum_dispatch = "0.3"
futures-util.workspace = true
lazy_static.workspace = true
@@ -32,7 +37,9 @@ parking_lot = "0.12"
prometheus.workspace = true
prost.workspace = true
rand.workspace = true
serde.workspace = true
serde_json.workspace = true
session.workspace = true
snafu.workspace = true
tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] }

View File

@@ -14,7 +14,7 @@
use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use api::v1::ResponseHeader;
use arc_swap::ArcSwapOption;
use arrow_flight::Ticket;
@@ -23,7 +23,7 @@ use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::datanode_manager::{Datanode, HandleResponse};
use common_meta::datanode_manager::{AffectedRows, Datanode};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
@@ -46,7 +46,7 @@ pub struct RegionRequester {
#[async_trait]
impl Datanode for RegionRequester {
async fn handle(&self, request: RegionRequest) -> MetaResult<HandleResponse> {
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
self.handle_inner(request).await.map_err(|err| {
if err.should_retry() {
meta_error::Error::RetryLater {
@@ -165,7 +165,7 @@ impl RegionRequester {
Ok(Box::pin(record_batch_stream))
}
async fn handle_inner(&self, request: RegionRequest) -> Result<HandleResponse> {
async fn handle_inner(&self, request: RegionRequest) -> Result<AffectedRows> {
let request_type = request
.body
.as_ref()
@@ -178,7 +178,10 @@ impl RegionRequester {
let mut client = self.client.raw_region_client()?;
let response = client
let RegionResponse {
header,
affected_rows,
} = client
.handle(request)
.await
.map_err(|e| {
@@ -192,20 +195,19 @@ impl RegionRequester {
})?
.into_inner();
check_response_header(&response.header)?;
check_response_header(header)?;
Ok(HandleResponse::from_region_response(response))
Ok(affected_rows as _)
}
pub async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
pub async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
self.handle_inner(request).await
}
}
pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> {
pub fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
let status = header
.as_ref()
.and_then(|header| header.status.as_ref())
.and_then(|header| header.status)
.context(IllegalDatabaseResponseSnafu {
err_msg: "either response header or status is missing",
})?;
@@ -219,7 +221,7 @@ pub fn check_response_header(header: &Option<ResponseHeader>) -> Result<()> {
})?;
ServerSnafu {
code,
msg: status.err_msg.clone(),
msg: status.err_msg,
}
.fail()
}
@@ -234,19 +236,19 @@ mod test {
#[test]
fn test_check_response_header() {
let result = check_response_header(&None);
let result = check_response_header(None);
assert!(matches!(
result.unwrap_err(),
IllegalDatabaseResponse { .. }
));
let result = check_response_header(&Some(ResponseHeader { status: None }));
let result = check_response_header(Some(ResponseHeader { status: None }));
assert!(matches!(
result.unwrap_err(),
IllegalDatabaseResponse { .. }
));
let result = check_response_header(&Some(ResponseHeader {
let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus {
status_code: StatusCode::Success as u32,
err_msg: String::default(),
@@ -254,7 +256,7 @@ mod test {
}));
assert!(result.is_ok());
let result = check_response_header(&Some(ResponseHeader {
let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus {
status_code: u32::MAX,
err_msg: String::default(),
@@ -265,7 +267,7 @@ mod test {
IllegalDatabaseResponse { .. }
));
let result = check_response_header(&Some(ResponseHeader {
let result = check_response_header(Some(ResponseHeader {
status: Some(PbStatus {
status_code: StatusCode::Internal as u32,
err_msg: "blabla".to_string(),

View File

@@ -16,6 +16,7 @@ tokio-console = ["common-telemetry/tokio-console"]
workspace = true
[dependencies]
anymap = "1.0.0-beta.2"
async-trait.workspace = true
auth.workspace = true
catalog.workspace = true
@@ -51,6 +52,7 @@ meta-client.workspace = true
meta-srv.workspace = true
mito2.workspace = true
nu-ansi-term = "0.46"
partition.workspace = true
plugins.workspace = true
prometheus.workspace = true
prost.workspace = true

View File

@@ -13,8 +13,5 @@
// limitations under the License.
fn main() {
// Trigger this script if the git branch/commit changes
println!("cargo:rerun-if-changed=.git/refs/heads");
common_version::setup_build_info();
}

View File

@@ -106,15 +106,9 @@ impl TableMetadataBencher {
.await
.unwrap();
let start = Instant::now();
let table_info = table_info.unwrap();
let table_id = table_info.table_info.ident.table_id;
let _ = self
.table_metadata_manager
.delete_table_metadata(
table_id,
&table_info.table_name(),
table_route.unwrap().region_routes().unwrap(),
)
.delete_table_metadata(&table_info.unwrap(), &table_route.unwrap())
.await;
start.elapsed()
},

View File

@@ -22,7 +22,6 @@ use catalog::kvbackend::{
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_error::ext::ErrorExt;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging;
@@ -253,11 +252,9 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_list =
KvBackendCatalogManager::new(cached_meta_backend.clone(), multi_cache_invalidator).await;
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,

View File

@@ -16,10 +16,9 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager};
use catalog::kvbackend::CachedMetaKvBackendBuilder;
use clap::Parser;
use client::client_manager::DatanodeClients;
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_telemetry::logging;
@@ -248,19 +247,11 @@ impl StartCommand {
.cache_tti(cache_tti)
.build();
let cached_meta_backend = Arc::new(cached_meta_backend);
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_manager = KvBackendCatalogManager::new(
cached_meta_backend.clone(),
multi_cache_invalidator.clone(),
)
.await;
let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(InvalidateTableCacheHandler::new(
multi_cache_invalidator.clone(),
cached_meta_backend.clone(),
)),
]);
@@ -272,12 +263,11 @@ impl StartCommand {
let mut instance = FrontendBuilder::new(
cached_meta_backend.clone(),
catalog_manager,
Arc::new(DatanodeClients::default()),
meta_client,
)
.with_cache_invalidator(cached_meta_backend)
.with_plugin(plugins.clone())
.with_cache_invalidator(multi_cache_invalidator)
.with_heartbeat_task(heartbeat_task)
.try_build()
.await

View File

@@ -16,11 +16,10 @@ use std::sync::Arc;
use std::{fs, path};
use async_trait::async_trait;
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_config::{metadata_store_dir, KvBackendConfig};
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl::ProcedureExecutorRef;
@@ -400,10 +399,6 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
let catalog_manager =
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
@@ -427,27 +422,22 @@ impl StartCommand {
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence,
wal_options_allocator.clone(),
table_metadata_manager.table_name_manager().clone(),
));
let ddl_task_executor = Self::create_ddl_task_executor(
table_metadata_manager,
procedure_manager.clone(),
datanode_manager.clone(),
multi_cache_invalidator,
table_meta_allocator,
)
.await?;
let mut frontend = FrontendBuilder::new(
kv_backend,
catalog_manager,
datanode_manager,
ddl_task_executor,
)
.with_plugin(fe_plugins.clone())
.try_build()
.await
.context(StartFrontendSnafu)?;
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
.with_plugin(fe_plugins.clone())
.try_build()
.await
.context(StartFrontendSnafu)?;
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
@@ -469,18 +459,16 @@ impl StartCommand {
table_metadata_manager: TableMetadataManagerRef,
procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_meta_allocator: TableMetadataAllocatorRef,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new(
procedure_manager,
datanode_manager,
cache_invalidator,
Arc::new(DummyCacheInvalidator),
table_metadata_manager,
table_meta_allocator,
Arc::new(MemoryRegionKeeper::default()),
true,
)
.context(InitDdlManagerSnafu)?,
);

View File

@@ -1,6 +1,20 @@
// Copyright (c) 2017-present, PingCAP, Inc. Licensed under Apache-2.0.
// This file is copied from https://github.com/tikv/raft-engine/blob/0.3.0/src/util.rs
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs
use std::fmt::{self, Debug, Display, Write};
use std::ops::{Div, Mul};

View File

@@ -55,10 +55,10 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
/// schema name
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
/// `<catalog>` and `<schema>`.
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
match parse_optional_catalog_and_schema_from_db_string(db) {
(Some(catalog), schema) => (catalog, schema),
(None, schema) => (DEFAULT_CATALOG_NAME.to_string(), schema),
(None, schema) => (DEFAULT_CATALOG_NAME, schema),
}
}
@@ -66,12 +66,12 @@ pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
///
/// Similar to [`parse_catalog_and_schema_from_db_string`] but returns an optional
/// catalog if it's not provided in the database name.
pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<String>, String) {
pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<&str>, &str) {
let parts = db.splitn(2, '-').collect::<Vec<&str>>();
if parts.len() == 2 {
(Some(parts[0].to_lowercase()), parts[1].to_lowercase())
(Some(parts[0]), parts[1])
} else {
(None, db.to_lowercase())
(None, db)
}
}
@@ -88,37 +88,32 @@ mod tests {
#[test]
fn test_parse_catalog_and_schema() {
assert_eq!(
(DEFAULT_CATALOG_NAME.to_string(), "fullschema".to_string()),
(DEFAULT_CATALOG_NAME, "fullschema"),
parse_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
("catalog".to_string(), "schema".to_string()),
("catalog", "schema"),
parse_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
("catalog".to_string(), "schema1-schema2".to_string()),
("catalog", "schema1-schema2"),
parse_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
assert_eq!(
(None, "fullschema".to_string()),
(None, "fullschema"),
parse_optional_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
(Some("catalog".to_string()), "schema".to_string()),
(Some("catalog"), "schema"),
parse_optional_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
(Some("catalog".to_string()), "schema".to_string()),
parse_optional_catalog_and_schema_from_db_string("CATALOG-SCHEMA")
);
assert_eq!(
(Some("catalog".to_string()), "schema1-schema2".to_string()),
(Some("catalog"), "schema1-schema2"),
parse_optional_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
}

View File

@@ -9,6 +9,7 @@ workspace = true
[dependencies]
common-base.workspace = true
humantime-serde.workspace = true
num_cpus.workspace = true
serde.workspace = true
sysinfo.workspace = true

View File

@@ -8,6 +8,7 @@ license.workspace = true
workspace = true
[dependencies]
arrow.workspace = true
bigdecimal.workspace = true
common-error.workspace = true
common-macro.workspace = true

View File

@@ -11,6 +11,7 @@ workspace = true
api.workspace = true
arc-swap = "1.0"
async-trait.workspace = true
chrono-tz = "0.6"
common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
@@ -23,6 +24,7 @@ common-time.workspace = true
common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
libc = "0.2"
num = "0.4"
num-traits = "0.2"
once_cell.workspace = true

View File

@@ -18,7 +18,6 @@ use async_trait::async_trait;
use common_base::AffectedRows;
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
use common_query::error::Result;
use common_query::Output;
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
@@ -27,7 +26,7 @@ use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, Ins
#[async_trait]
pub trait TableMutationHandler: Send + Sync {
/// Inserts rows into the table.
async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<Output>;
async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
/// Delete rows from the table.
async fn delete(&self, request: DeleteRequest, ctx: QueryContextRef) -> Result<AffectedRows>;

View File

@@ -23,7 +23,7 @@ use datatypes::prelude::VectorRef;
use datatypes::types::TimestampType;
use datatypes::value::Value;
use datatypes::vectors::{
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, Vector,
};
use snafu::{ensure, OptionExt};
@@ -43,7 +43,6 @@ fn convert_to_timezone(arg: &str) -> Option<Timezone> {
fn convert_to_timestamp(arg: &Value) -> Option<Timestamp> {
match arg {
Value::Timestamp(ts) => Some(*ts),
Value::Int64(i) => Some(Timestamp::new_millisecond(*i)),
_ => None,
}
}
@@ -67,8 +66,6 @@ impl Function for ToTimezoneFunction {
fn signature(&self) -> Signature {
helper::one_of_sigs2(
vec![
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::timestamp_second_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
@@ -89,45 +86,39 @@ impl Function for ToTimezoneFunction {
}
);
// TODO: maybe support epoch timestamp? https://github.com/GreptimeTeam/greptimedb/issues/3477
let ts = columns[0].data_type().as_timestamp().with_context(|| {
UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
})?;
let array = columns[0].to_arrow_array();
let times = match columns[0].data_type() {
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
let vector = Int64Vector::try_from_arrow_array(array).unwrap();
let times = match ts {
TimestampType::Second(_) => {
let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
ConcreteDataType::Timestamp(ts) => match ts {
TimestampType::Second(_) => {
let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
TimestampType::Millisecond(_) => {
let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
TimestampType::Microsecond(_) => {
let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
TimestampType::Nanosecond(_) => {
let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
},
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
TimestampType::Millisecond(_) => {
let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
TimestampType::Microsecond(_) => {
let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
TimestampType::Nanosecond(_) => {
let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
(0..vector.len())
.map(|i| convert_to_timestamp(&vector.get(i)))
.collect::<Vec<_>>()
}
.fail()?,
};
let tzs = {
@@ -162,7 +153,7 @@ mod tests {
use datatypes::timestamp::{
TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
};
use datatypes::vectors::{Int64Vector, StringVector};
use datatypes::vectors::StringVector;
use super::*;
@@ -266,48 +257,4 @@ mod tests {
let expect_times: VectorRef = Arc::new(StringVector::from(results));
assert_eq!(expect_times, vector);
}
#[test]
fn test_numerical_to_timezone() {
let f = ToTimezoneFunction;
let results = vec![
Some("1969-12-31 19:00:00.001"),
None,
Some("1970-01-01 03:00:00.001"),
None,
Some("2024-03-26 23:01:50"),
None,
Some("2024-03-27 06:02:00"),
None,
];
let times: Vec<Option<i64>> = vec![
Some(1),
None,
Some(1),
None,
Some(1711508510000),
None,
Some(1711508520000),
None,
];
let ts_vector: Int64Vector = Int64Vector::from_owned_iterator(times.into_iter());
let tzs = vec![
Some("America/New_York"),
None,
Some("Europe/Moscow"),
None,
Some("America/New_York"),
None,
Some("Europe/Moscow"),
None,
];
let args: Vec<VectorRef> = vec![
Arc::new(ts_vector),
Arc::new(StringVector::from(tzs.clone())),
];
let vector = f.eval(FunctionContext::default(), &args).unwrap();
assert_eq!(8, vector.len());
let expect_times: VectorRef = Arc::new(StringVector::from(results));
assert_eq!(expect_times, vector);
}
}

View File

@@ -35,7 +35,6 @@ impl FunctionState {
use common_base::AffectedRows;
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
use common_query::error::Result;
use common_query::Output;
use session::context::QueryContextRef;
use store_api::storage::RegionId;
use table::requests::{
@@ -71,8 +70,8 @@ impl FunctionState {
&self,
_request: InsertRequest,
_ctx: QueryContextRef,
) -> Result<Output> {
Ok(Output::new_with_affected_rows(ROWS))
) -> Result<AffectedRows> {
Ok(ROWS)
}
async fn delete(

View File

@@ -9,10 +9,12 @@ workspace = true
[dependencies]
async-trait.workspace = true
common-error.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
reqwest.workspace = true
serde.workspace = true
serde_json.workspace = true
tokio.workspace = true
uuid.workspace = true

View File

@@ -9,11 +9,13 @@ workspace = true
[dependencies]
api.workspace = true
async-trait.workspace = true
common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-query.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
datatypes.workspace = true
snafu.workspace = true

View File

@@ -10,6 +10,8 @@ workspace = true
[dependencies]
api.workspace = true
arrow-flight.workspace = true
async-trait = "0.1"
backtrace = "0.3"
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
@@ -18,8 +20,10 @@ common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
dashmap.workspace = true
datafusion.workspace = true
datatypes.workspace = true
flatbuffers = "23.1"
futures = "0.3"
lazy_static.workspace = true
prost.workspace = true
snafu.workspace = true

View File

@@ -56,18 +56,6 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
} = &sig;
let arg_types = ok!(extract_input_types(inputs));
// with format like Float64Array
let array_types = arg_types
.iter()
.map(|ty| {
if let Type::Reference(TypeReference { elem, .. }) = ty {
elem.as_ref().clone()
} else {
ty.clone()
}
})
.collect::<Vec<_>>();
// build the struct and its impl block
// only do this when `display_name` is specified
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
@@ -76,8 +64,6 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
vis,
ok!(get_ident(&arg_map, "name", arg_span)),
display_name,
array_types,
ok!(get_ident(&arg_map, "ret", arg_span)),
);
result.extend(struct_code);
}
@@ -104,8 +90,6 @@ fn build_struct(
vis: Visibility,
name: Ident,
display_name_ident: Ident,
array_types: Vec<Type>,
return_array_type: Ident,
) -> TokenStream {
let display_name = display_name_ident.to_string();
quote! {
@@ -130,12 +114,18 @@ fn build_struct(
}
}
// TODO(ruihang): this should be parameterized
// time index column and value column
fn input_type() -> Vec<DataType> {
vec![#( RangeArray::convert_data_type(#array_types::new_null(0).data_type().clone()), )*]
vec![
RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)),
RangeArray::convert_data_type(DataType::Float64),
]
}
// TODO(ruihang): this should be parameterized
fn return_type() -> DataType {
#return_array_type::new_null(0).data_type().clone()
DataType::Float64
}
}
}
@@ -170,7 +160,6 @@ fn build_calc_fn(
.map(|name| Ident::new(&format!("{}_range_array", name), name.span()))
.collect::<Vec<_>>();
let first_range_array_name = range_array_names.first().unwrap().clone();
let first_param_name = param_names.first().unwrap().clone();
quote! {
impl #name {
@@ -179,29 +168,13 @@ fn build_calc_fn(
#( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.to_data().into())?; )*
// check arrays len
{
let len_first = #first_range_array_name.len();
#(
if len_first != #range_array_names.len() {
return Err(DataFusionError::Execution(format!("RangeArray have different lengths in PromQL function {}: array1={}, array2={}", #name::name(), len_first, #range_array_names.len())));
}
)*
}
// TODO(ruihang): add ensure!()
let mut result_array = Vec::new();
for index in 0..#first_range_array_name.len(){
#( let #param_names = #range_array_names.get(index).unwrap().as_any().downcast_ref::<#unref_param_types>().unwrap().clone(); )*
// check element len
{
let len_first = #first_param_name.len();
#(
if len_first != #param_names.len() {
return Err(DataFusionError::Execution(format!("RangeArray's element {} have different lengths in PromQL function {}: array1={}, array2={}", index, #name::name(), len_first, #param_names.len())));
}
)*
}
// TODO(ruihang): add ensure!() to check length
let result = #fn_name(#( &#param_names, )*);
result_array.push(result);

View File

@@ -13,6 +13,7 @@ workspace = true
[dependencies]
api.workspace = true
async-recursion = "1.0"
async-stream.workspace = true
async-trait.workspace = true
base64.workspace = true
bytes.workspace = true
@@ -25,6 +26,7 @@ common-macro.workspace = true
common-procedure.workspace = true
common-procedure-test.workspace = true
common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
common-wal.workspace = true
@@ -51,7 +53,6 @@ strum.workspace = true
table.workspace = true
tokio.workspace = true
tonic.workspace = true
typetag = "0.2"
[dev-dependencies]
chrono.workspace = true

View File

@@ -14,14 +14,14 @@
use std::sync::Arc;
use tokio::sync::RwLock;
use table::metadata::TableId;
use crate::error::Result;
use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoKey;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteKey;
use crate::key::TableMetaKey;
use crate::table_name::TableName;
/// KvBackend cache invalidator
#[async_trait::async_trait]
@@ -46,7 +46,10 @@ pub struct Context {
#[async_trait::async_trait]
pub trait CacheInvalidator: Send + Sync {
async fn invalidate(&self, ctx: &Context, caches: Vec<CacheIdent>) -> Result<()>;
// Invalidates table cache
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> Result<()>;
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> Result<()>;
}
pub type CacheInvalidatorRef = Arc<dyn CacheInvalidator>;
@@ -55,35 +58,11 @@ pub struct DummyCacheInvalidator;
#[async_trait::async_trait]
impl CacheInvalidator for DummyCacheInvalidator {
async fn invalidate(&self, _ctx: &Context, _caches: Vec<CacheIdent>) -> Result<()> {
async fn invalidate_table_id(&self, _ctx: &Context, _table_id: TableId) -> Result<()> {
Ok(())
}
}
#[derive(Default)]
pub struct MultiCacheInvalidator {
invalidators: RwLock<Vec<CacheInvalidatorRef>>,
}
impl MultiCacheInvalidator {
pub fn with_invalidators(invalidators: Vec<CacheInvalidatorRef>) -> Self {
Self {
invalidators: RwLock::new(invalidators),
}
}
pub async fn add_invalidator(&self, invalidator: CacheInvalidatorRef) {
self.invalidators.write().await.push(invalidator);
}
}
#[async_trait::async_trait]
impl CacheInvalidator for MultiCacheInvalidator {
async fn invalidate(&self, ctx: &Context, caches: Vec<CacheIdent>) -> Result<()> {
let invalidators = self.invalidators.read().await;
for invalidator in invalidators.iter() {
invalidator.invalidate(ctx, caches.clone()).await?;
}
async fn invalidate_table_name(&self, _ctx: &Context, _table_name: TableName) -> Result<()> {
Ok(())
}
}
@@ -93,22 +72,21 @@ impl<T> CacheInvalidator for T
where
T: KvCacheInvalidator,
{
async fn invalidate(&self, _ctx: &Context, caches: Vec<CacheIdent>) -> Result<()> {
for cache in caches {
match cache {
CacheIdent::TableId(table_id) => {
let key = TableInfoKey::new(table_id);
self.invalidate_key(&key.as_raw_key()).await;
async fn invalidate_table_name(&self, _ctx: &Context, table_name: TableName) -> Result<()> {
let key: TableNameKey = (&table_name).into();
self.invalidate_key(&key.as_raw_key()).await;
Ok(())
}
async fn invalidate_table_id(&self, _ctx: &Context, table_id: TableId) -> Result<()> {
let key = TableInfoKey::new(table_id);
self.invalidate_key(&key.as_raw_key()).await;
let key = &TableRouteKey { table_id };
self.invalidate_key(&key.as_raw_key()).await;
let key = &TableRouteKey { table_id };
self.invalidate_key(&key.as_raw_key()).await;
}
CacheIdent::TableName(table_name) => {
let key: TableNameKey = (&table_name).into();
self.invalidate_key(&key.as_raw_key()).await
}
}
}
Ok(())
}
}

View File

@@ -12,10 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use api::v1::region::{QueryRequest, RegionRequest};
pub use common_base::AffectedRows;
use common_recordbatch::SendableRecordBatchStream;
@@ -26,7 +25,7 @@ use crate::peer::Peer;
#[async_trait::async_trait]
pub trait Datanode: Send + Sync {
/// Handles DML, and DDL requests.
async fn handle(&self, request: RegionRequest) -> Result<HandleResponse>;
async fn handle(&self, request: RegionRequest) -> Result<AffectedRows>;
/// Handles query requests
async fn handle_query(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
@@ -42,27 +41,3 @@ pub trait DatanodeManager: Send + Sync {
}
pub type DatanodeManagerRef = Arc<dyn DatanodeManager>;
/// This result struct is derived from [RegionResponse]
#[derive(Debug)]
pub struct HandleResponse {
pub affected_rows: AffectedRows,
pub extension: HashMap<String, Vec<u8>>,
}
impl HandleResponse {
pub fn from_region_response(region_response: RegionResponse) -> Self {
Self {
affected_rows: region_response.affected_rows as _,
extension: region_response.extension,
}
}
/// Creates one response without extension
pub fn new(affected_rows: AffectedRows) -> Self {
Self {
affected_rows,
extension: Default::default(),
}
}
}

View File

@@ -22,20 +22,17 @@ use self::table_meta::TableMetadataAllocatorRef;
use crate::cache_invalidator::CacheInvalidatorRef;
use crate::datanode_manager::DatanodeManagerRef;
use crate::error::Result;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::table_route::TableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
pub mod alter_logical_tables;
pub mod alter_table;
pub mod create_logical_tables;
pub mod create_table;
mod create_table_template;
pub mod drop_database;
pub mod drop_table;
mod physical_table_metadata;
pub mod table_meta;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
@@ -86,7 +83,7 @@ pub struct TableMetadata {
/// Table id.
pub table_id: TableId,
/// Route information for each region of the table.
pub table_route: PhysicalTableRouteValue,
pub table_route: TableRouteValue,
/// The encoded wal options for regions of the table.
// If a region does not have an associated wal options, no key for the region would be found in the map.
pub region_wal_options: HashMap<RegionNumber, String>,

View File

@@ -1,309 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod check;
mod metadata;
mod region_request;
mod table_cache_keys;
mod update_metadata;
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context, LockKey, Procedure, Status};
use common_telemetry::{info, warn};
use futures_util::future;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
use store_api::metadata::ColumnMetadata;
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
use strum::AsRefStr;
use table::metadata::TableId;
use crate::ddl::utils::add_peer_context_if_needed;
use crate::ddl::{physical_table_metadata, DdlContext};
use crate::error::{DecodeJsonSnafu, Error, MetadataCorruptionSnafu, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders};
use crate::{cache_invalidator, metrics, ClusterId};
pub struct AlterLogicalTablesProcedure {
pub context: DdlContext,
pub data: AlterTablesData,
}
impl AlterLogicalTablesProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
pub fn new(
cluster_id: ClusterId,
tasks: Vec<AlterTableTask>,
physical_table_id: TableId,
context: DdlContext,
) -> Self {
Self {
context,
data: AlterTablesData {
cluster_id,
state: AlterTablesState::Prepare,
tasks,
table_info_values: vec![],
physical_table_id,
physical_table_info: None,
physical_table_route: None,
physical_columns: vec![],
},
}
}
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(Self { context, data })
}
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
// Checks all the tasks
self.check_input_tasks()?;
// Fills the table info values
self.fill_table_info_values().await?;
// Checks the physical table, must after [fill_table_info_values]
self.check_physical_table().await?;
// Fills the physical table info
self.fill_physical_table_info().await?;
// Filter the finished tasks
let finished_tasks = self.check_finished_tasks()?;
let already_finished_count = finished_tasks
.iter()
.map(|x| if *x { 1 } else { 0 })
.sum::<usize>();
let apply_tasks_count = self.data.tasks.len();
if already_finished_count == apply_tasks_count {
info!("All the alter tasks are finished, will skip the procedure.");
// Re-invalidate the table cache
self.data.state = AlterTablesState::InvalidateTableCache;
return Ok(Status::executing(true));
} else if already_finished_count > 0 {
info!(
"There are {} alter tasks, {} of them were already finished.",
apply_tasks_count, already_finished_count
);
}
self.filter_task(&finished_tasks)?;
// Next state
self.data.state = AlterTablesState::SubmitAlterRegionRequests;
Ok(Status::executing(true))
}
pub(crate) async fn on_submit_alter_region_requests(&mut self) -> Result<Status> {
// Safety: we have checked the state in on_prepare
let physical_table_route = &self.data.physical_table_route.as_ref().unwrap();
let leaders = find_leaders(&physical_table_route.region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
for peer in leaders {
let requester = self.context.datanode_manager.datanode(&peer).await;
let region_numbers = find_leader_regions(&physical_table_route.region_routes, &peer);
for region_number in region_numbers {
let request = self.make_request(region_number)?;
let peer = peer.clone();
let requester = requester.clone();
alter_region_tasks.push(async move {
requester
.handle(request)
.await
.map_err(add_peer_context_if_needed(peer))
});
}
}
// Collects responses from all the alter region tasks.
let phy_raw_schemas = future::join_all(alter_region_tasks)
.await
.into_iter()
.map(|res| res.map(|mut res| res.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
.collect::<Result<Vec<_>>>()?;
if phy_raw_schemas.is_empty() {
self.data.state = AlterTablesState::UpdateMetadata;
return Ok(Status::executing(true));
}
// Verify all the physical schemas are the same
// Safety: previous check ensures this vec is not empty
let first = phy_raw_schemas.first().unwrap();
ensure!(
phy_raw_schemas.iter().all(|x| x == first),
MetadataCorruptionSnafu {
err_msg: "The physical schemas from datanodes are not the same."
}
);
// Decodes the physical raw schemas
if let Some(phy_raw_schema) = first {
self.data.physical_columns =
ColumnMetadata::decode_list(phy_raw_schema).context(DecodeJsonSnafu)?;
} else {
warn!("altering logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
}
self.data.state = AlterTablesState::UpdateMetadata;
Ok(Status::executing(true))
}
pub(crate) async fn on_update_metadata(&mut self) -> Result<Status> {
if !self.data.physical_columns.is_empty() {
let physical_table_info = self.data.physical_table_info.as_ref().unwrap();
// Generates new table info
let old_raw_table_info = physical_table_info.table_info.clone();
let new_raw_table_info = physical_table_metadata::build_new_physical_table_info(
old_raw_table_info,
&self.data.physical_columns,
);
// Updates physical table's metadata
self.context
.table_metadata_manager
.update_table_info(
DeserializedValueWithBytes::from_inner(physical_table_info.clone()),
new_raw_table_info,
)
.await?;
}
let table_info_values = self.build_update_metadata()?;
let manager = &self.context.table_metadata_manager;
let chunk_size = manager.batch_update_table_info_value_chunk_size();
if table_info_values.len() > chunk_size {
let chunks = table_info_values
.into_iter()
.chunks(chunk_size)
.into_iter()
.map(|check| check.collect::<Vec<_>>())
.collect::<Vec<_>>();
for chunk in chunks {
manager.batch_update_table_info_values(chunk).await?;
}
} else {
manager
.batch_update_table_info_values(table_info_values)
.await?;
}
self.data.state = AlterTablesState::InvalidateTableCache;
Ok(Status::executing(true))
}
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
let ctx = cache_invalidator::Context::default();
let to_invalidate = self.build_table_cache_keys_to_invalidate();
self.context
.cache_invalidator
.invalidate(&ctx, to_invalidate)
.await?;
Ok(Status::done())
}
}
#[async_trait]
impl Procedure for AlterLogicalTablesProcedure {
fn type_name(&self) -> &str {
Self::TYPE_NAME
}
async fn execute(&mut self, _ctx: &Context) -> ProcedureResult<Status> {
let error_handler = |e: Error| {
if e.is_retry_later() {
common_procedure::Error::retry_later(e)
} else {
common_procedure::Error::external(e)
}
};
let state = &self.data.state;
let step = state.as_ref();
let _timer = metrics::METRIC_META_PROCEDURE_ALTER_TABLE
.with_label_values(&[step])
.start_timer();
match state {
AlterTablesState::Prepare => self.on_prepare().await,
AlterTablesState::SubmitAlterRegionRequests => {
self.on_submit_alter_region_requests().await
}
AlterTablesState::UpdateMetadata => self.on_update_metadata().await,
AlterTablesState::InvalidateTableCache => self.on_invalidate_table_cache().await,
}
.map_err(error_handler)
}
fn dump(&self) -> ProcedureResult<String> {
serde_json::to_string(&self.data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
// CatalogLock, SchemaLock,
// TableLock
// TableNameLock(s)
let mut lock_key = Vec::with_capacity(2 + 1 + self.data.tasks.len());
let table_ref = self.data.tasks[0].table_ref();
lock_key.push(CatalogLock::Read(table_ref.catalog).into());
lock_key.push(SchemaLock::read(table_ref.catalog, table_ref.schema).into());
lock_key.push(TableLock::Write(self.data.physical_table_id).into());
lock_key.extend(
self.data
.table_info_values
.iter()
.map(|table| TableLock::Write(table.table_info.ident.table_id).into()),
);
LockKey::new(lock_key)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterTablesData {
cluster_id: ClusterId,
state: AlterTablesState,
tasks: Vec<AlterTableTask>,
/// Table info values before the alter operation.
/// Corresponding one-to-one with the AlterTableTask in tasks.
table_info_values: Vec<TableInfoValue>,
/// Physical table info
physical_table_id: TableId,
physical_table_info: Option<TableInfoValue>,
physical_table_route: Option<PhysicalTableRouteValue>,
physical_columns: Vec<ColumnMetadata>,
}
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
enum AlterTablesState {
/// Prepares to alter the table
Prepare,
SubmitAlterRegionRequests,
/// Updates table metadata.
UpdateMetadata,
/// Broadcasts the invalidating table cache instruction.
InvalidateTableCache,
}

View File

@@ -1,136 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use api::v1::alter_expr::Kind;
use snafu::{ensure, OptionExt};
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::error::{AlterLogicalTablesInvalidArgumentsSnafu, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::AlterTableTask;
impl AlterLogicalTablesProcedure {
pub(crate) fn check_input_tasks(&self) -> Result<()> {
self.check_schema()?;
self.check_alter_kind()?;
Ok(())
}
pub(crate) async fn check_physical_table(&self) -> Result<()> {
let table_route_manager = self.context.table_metadata_manager.table_route_manager();
let table_ids = self
.data
.table_info_values
.iter()
.map(|v| v.table_info.ident.table_id)
.collect::<Vec<_>>();
let table_routes = table_route_manager
.table_route_storage()
.batch_get(&table_ids)
.await?;
let physical_table_id = self.data.physical_table_id;
let is_same_physical_table = table_routes.iter().all(|r| {
if let Some(TableRouteValue::Logical(r)) = r {
r.physical_table_id() == physical_table_id
} else {
false
}
});
ensure!(
is_same_physical_table,
AlterLogicalTablesInvalidArgumentsSnafu {
err_msg: "All the tasks should have the same physical table id"
}
);
Ok(())
}
pub(crate) fn check_finished_tasks(&self) -> Result<Vec<bool>> {
let task = &self.data.tasks;
let table_info_values = &self.data.table_info_values;
Ok(task
.iter()
.zip(table_info_values.iter())
.map(|(task, table)| Self::check_finished_task(task, table))
.collect())
}
// Checks if the schemas of the tasks are the same
fn check_schema(&self) -> Result<()> {
let is_same_schema = self.data.tasks.windows(2).all(|pair| {
pair[0].alter_table.catalog_name == pair[1].alter_table.catalog_name
&& pair[0].alter_table.schema_name == pair[1].alter_table.schema_name
});
ensure!(
is_same_schema,
AlterLogicalTablesInvalidArgumentsSnafu {
err_msg: "Schemas of the tasks are not the same"
}
);
Ok(())
}
fn check_alter_kind(&self) -> Result<()> {
for task in &self.data.tasks {
let kind = task.alter_table.kind.as_ref().context(
AlterLogicalTablesInvalidArgumentsSnafu {
err_msg: "Alter kind is missing",
},
)?;
let Kind::AddColumns(_) = kind else {
return AlterLogicalTablesInvalidArgumentsSnafu {
err_msg: "Only support add columns operation",
}
.fail();
};
}
Ok(())
}
fn check_finished_task(task: &AlterTableTask, table: &TableInfoValue) -> bool {
let columns = table
.table_info
.meta
.schema
.column_schemas
.iter()
.map(|c| &c.name)
.collect::<HashSet<_>>();
let Some(kind) = task.alter_table.kind.as_ref() else {
return true; // Never get here since we have checked it in `check_alter_kind`
};
let Kind::AddColumns(add_columns) = kind else {
return true; // Never get here since we have checked it in `check_alter_kind`
};
// We only check that all columns have been finished. That is to say,
// if one part is finished but another part is not, it will be considered
// unfinished.
add_columns
.add_columns
.iter()
.map(|add_column| add_column.column_def.as_ref().map(|c| &c.name))
.all(|column| column.map(|c| columns.contains(c)).unwrap_or(false))
}
}

View File

@@ -1,159 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_catalog::format_full_table_name;
use snafu::OptionExt;
use table::metadata::TableId;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::error::{
AlterLogicalTablesInvalidArgumentsSnafu, Result, TableInfoNotFoundSnafu, TableNotFoundSnafu,
TableRouteNotFoundSnafu,
};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::AlterTableTask;
impl AlterLogicalTablesProcedure {
pub(crate) fn filter_task(&mut self, finished_tasks: &[bool]) -> Result<()> {
debug_assert_eq!(finished_tasks.len(), self.data.tasks.len());
debug_assert_eq!(finished_tasks.len(), self.data.table_info_values.len());
self.data.tasks = self
.data
.tasks
.drain(..)
.zip(finished_tasks.iter())
.filter_map(|(task, finished)| if *finished { None } else { Some(task) })
.collect();
self.data.table_info_values = self
.data
.table_info_values
.drain(..)
.zip(finished_tasks.iter())
.filter_map(|(table_info_value, finished)| {
if *finished {
None
} else {
Some(table_info_value)
}
})
.collect();
Ok(())
}
pub(crate) async fn fill_physical_table_info(&mut self) -> Result<()> {
let (physical_table_info, physical_table_route) = self
.context
.table_metadata_manager
.get_full_table_info(self.data.physical_table_id)
.await?;
let physical_table_info = physical_table_info
.with_context(|| TableInfoNotFoundSnafu {
table: format!("table id - {}", self.data.physical_table_id),
})?
.into_inner();
let physical_table_route = physical_table_route
.context(TableRouteNotFoundSnafu {
table_id: self.data.physical_table_id,
})?
.into_inner();
self.data.physical_table_info = Some(physical_table_info);
let TableRouteValue::Physical(physical_table_route) = physical_table_route else {
return AlterLogicalTablesInvalidArgumentsSnafu {
err_msg: format!(
"expected a physical table but got a logical table: {:?}",
self.data.physical_table_id
),
}
.fail();
};
self.data.physical_table_route = Some(physical_table_route);
Ok(())
}
pub(crate) async fn fill_table_info_values(&mut self) -> Result<()> {
let table_ids = self.get_all_table_ids().await?;
let table_info_values = self.get_all_table_info_values(&table_ids).await?;
debug_assert_eq!(table_info_values.len(), self.data.tasks.len());
self.data.table_info_values = table_info_values;
Ok(())
}
async fn get_all_table_info_values(
&self,
table_ids: &[TableId],
) -> Result<Vec<TableInfoValue>> {
let table_info_manager = self.context.table_metadata_manager.table_info_manager();
let mut table_info_map = table_info_manager.batch_get(table_ids).await?;
let mut table_info_values = Vec::with_capacity(table_ids.len());
for (table_id, task) in table_ids.iter().zip(self.data.tasks.iter()) {
let table_info_value =
table_info_map
.remove(table_id)
.with_context(|| TableInfoNotFoundSnafu {
table: extract_table_name(task),
})?;
table_info_values.push(table_info_value);
}
Ok(table_info_values)
}
async fn get_all_table_ids(&self) -> Result<Vec<TableId>> {
let table_name_manager = self.context.table_metadata_manager.table_name_manager();
let table_name_keys = self
.data
.tasks
.iter()
.map(|task| extract_table_name_key(task))
.collect();
let table_name_values = table_name_manager.batch_get(table_name_keys).await?;
let mut table_ids = Vec::with_capacity(table_name_values.len());
for (value, task) in table_name_values.into_iter().zip(self.data.tasks.iter()) {
let table_id = value
.with_context(|| TableNotFoundSnafu {
table_name: extract_table_name(task),
})?
.table_id();
table_ids.push(table_id);
}
Ok(table_ids)
}
}
#[inline]
fn extract_table_name(task: &AlterTableTask) -> String {
format_full_table_name(
&task.alter_table.catalog_name,
&task.alter_table.schema_name,
&task.alter_table.table_name,
)
}
#[inline]
fn extract_table_name_key(task: &AlterTableTask) -> TableNameKey {
TableNameKey::new(
&task.alter_table.catalog_name,
&task.alter_table.schema_name,
&task.alter_table.table_name,
)
}

View File

@@ -1,98 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1;
use api::v1::alter_expr::Kind;
use api::v1::region::{
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
RegionColumnDef, RegionRequest, RegionRequestHeader,
};
use common_telemetry::tracing_context::TracingContext;
use store_api::storage::{RegionId, RegionNumber};
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::error::Result;
use crate::key::table_info::TableInfoValue;
use crate::rpc::ddl::AlterTableTask;
impl AlterLogicalTablesProcedure {
pub(crate) fn make_request(&self, region_number: RegionNumber) -> Result<RegionRequest> {
let alter_requests = self.make_alter_region_requests(region_number)?;
let request = RegionRequest {
header: Some(RegionRequestHeader {
tracing_context: TracingContext::from_current_span().to_w3c(),
..Default::default()
}),
body: Some(region_request::Body::Alters(alter_requests)),
};
Ok(request)
}
fn make_alter_region_requests(&self, region_number: RegionNumber) -> Result<AlterRequests> {
let mut requests = Vec::with_capacity(self.data.tasks.len());
for (task, table) in self
.data
.tasks
.iter()
.zip(self.data.table_info_values.iter())
{
let region_id = RegionId::new(table.table_info.ident.table_id, region_number);
let request = self.make_alter_region_request(region_id, task, table)?;
requests.push(request);
}
Ok(AlterRequests { requests })
}
fn make_alter_region_request(
&self,
region_id: RegionId,
task: &AlterTableTask,
table: &TableInfoValue,
) -> Result<AlterRequest> {
let region_id = region_id.as_u64();
let schema_version = table.table_info.ident.version;
let kind = match &task.alter_table.kind {
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
to_region_add_columns(add_columns),
)),
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
};
Ok(AlterRequest {
region_id,
schema_version,
kind,
})
}
}
fn to_region_add_columns(add_columns: &v1::AddColumns) -> AddColumns {
let add_columns = add_columns
.add_columns
.iter()
.map(|add_column| {
let region_column_def = RegionColumnDef {
column_def: add_column.column_def.clone(),
..Default::default() // other fields are not used in alter logical table
};
AddColumn {
column_def: Some(region_column_def),
..Default::default() // other fields are not used in alter logical table
}
})
.collect();
AddColumns { add_columns }
}

View File

@@ -1,51 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use table::metadata::RawTableInfo;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::instruction::CacheIdent;
use crate::table_name::TableName;
impl AlterLogicalTablesProcedure {
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
let mut cache_keys = self
.data
.table_info_values
.iter()
.flat_map(|table| {
vec![
CacheIdent::TableId(table.table_info.ident.table_id),
CacheIdent::TableName(extract_table_name(&table.table_info)),
]
})
.collect::<Vec<_>>();
cache_keys.push(CacheIdent::TableId(self.data.physical_table_id));
// Safety: physical_table_info already filled in previous steps
let physical_table_info = &self.data.physical_table_info.as_ref().unwrap().table_info;
cache_keys.push(CacheIdent::TableName(extract_table_name(
physical_table_info,
)));
cache_keys
}
}
fn extract_table_name(table_info: &RawTableInfo) -> TableName {
TableName::new(
&table_info.catalog_name,
&table_info.schema_name,
&table_info.name,
)
}

View File

@@ -1,70 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_grpc_expr::alter_expr_to_request;
use snafu::ResultExt;
use table::metadata::{RawTableInfo, TableInfo};
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::error;
use crate::error::{ConvertAlterTableRequestSnafu, Result};
use crate::key::table_info::TableInfoValue;
use crate::rpc::ddl::AlterTableTask;
impl AlterLogicalTablesProcedure {
pub(crate) fn build_update_metadata(&self) -> Result<Vec<(TableInfoValue, RawTableInfo)>> {
let mut table_info_values_to_update = Vec::with_capacity(self.data.tasks.len());
for (task, table) in self
.data
.tasks
.iter()
.zip(self.data.table_info_values.iter())
{
table_info_values_to_update.push(self.build_new_table_info(task, table)?);
}
Ok(table_info_values_to_update)
}
fn build_new_table_info(
&self,
task: &AlterTableTask,
table: &TableInfoValue,
) -> Result<(TableInfoValue, RawTableInfo)> {
// Builds new_meta
let table_info = TableInfo::try_from(table.table_info.clone())
.context(error::ConvertRawTableInfoSnafu)?;
let table_ref = task.table_ref();
let request =
alter_expr_to_request(table.table_info.ident.table_id, task.alter_table.clone())
.context(ConvertAlterTableRequestSnafu)?;
let new_meta = table_info
.meta
.builder_with_alter_kind(table_ref.table, &request.alter_kind, true)
.context(error::TableSnafu)?
.build()
.with_context(|_| error::BuildTableMetaSnafu {
table_name: table_ref.table,
})?;
let version = table_info.ident.version + 1;
let mut new_table = table_info;
new_table.meta = new_meta;
new_table.ident.version = version;
let mut raw_table_info = RawTableInfo::from(new_table);
raw_table_info.sort_columns();
Ok((table.clone(), raw_table_info))
}
}

View File

@@ -43,7 +43,6 @@ use crate::cache_invalidator::Context;
use crate::ddl::utils::add_peer_context_if_needed;
use crate::ddl::DdlContext;
use crate::error::{self, ConvertAlterTableRequestSnafu, Error, InvalidProtoMsgSnafu, Result};
use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::DeserializedValueWithBytes;
@@ -67,6 +66,7 @@ impl AlterTableProcedure {
cluster_id: u64,
task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
physical_table_info: Option<(TableId, TableName)>,
context: DdlContext,
) -> Result<Self> {
let alter_kind = task
@@ -86,7 +86,13 @@ impl AlterTableProcedure {
Ok(Self {
context,
data: AlterTableData::new(task, table_info_value, cluster_id, next_column_id),
data: AlterTableData::new(
task,
table_info_value,
physical_table_info,
cluster_id,
next_column_id,
),
kind,
})
}
@@ -274,7 +280,7 @@ impl AlterTableProcedure {
let new_meta = table_info
.meta
.builder_with_alter_kind(table_ref.table, &request.alter_kind, false)
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
.context(error::TableSnafu)?
.build()
.with_context(|_| error::BuildTableMetaSnafu {
@@ -324,24 +330,35 @@ impl AlterTableProcedure {
async fn on_broadcast(&mut self) -> Result<Status> {
let alter_kind = self.alter_kind()?;
let cache_invalidator = &self.context.cache_invalidator;
let cache_keys = if matches!(alter_kind, Kind::RenameTable { .. }) {
vec![CacheIdent::TableName(self.data.table_ref().into())]
} else {
vec![
CacheIdent::TableId(self.data.table_id()),
CacheIdent::TableName(self.data.table_ref().into()),
]
};
cache_invalidator
.invalidate(&Context::default(), cache_keys)
.await?;
if matches!(alter_kind, Kind::RenameTable { .. }) {
cache_invalidator
.invalidate_table_name(&Context::default(), self.data.table_ref().into())
.await?;
} else {
cache_invalidator
.invalidate_table_id(&Context::default(), self.data.table_id())
.await?;
};
Ok(Status::done())
}
fn lock_key_inner(&self) -> Vec<StringKey> {
let mut lock_key = vec![];
if let Some((physical_table_id, physical_table_name)) = self.data.physical_table_info() {
lock_key.push(CatalogLock::Read(&physical_table_name.catalog_name).into());
lock_key.push(
SchemaLock::read(
&physical_table_name.catalog_name,
&physical_table_name.schema_name,
)
.into(),
);
lock_key.push(TableLock::Read(*physical_table_id).into())
}
let table_ref = self.data.table_ref();
let table_id = self.data.table_id();
lock_key.push(CatalogLock::Read(table_ref.catalog).into());
@@ -419,6 +436,8 @@ pub struct AlterTableData {
task: AlterTableTask,
/// Table info value before alteration.
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
/// Physical table name, if the table to alter is a logical table.
physical_table_info: Option<(TableId, TableName)>,
/// Next column id of the table if the task adds columns to the table.
next_column_id: Option<ColumnId>,
}
@@ -427,6 +446,7 @@ impl AlterTableData {
pub fn new(
task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
physical_table_info: Option<(TableId, TableName)>,
cluster_id: u64,
next_column_id: Option<ColumnId>,
) -> Self {
@@ -434,6 +454,7 @@ impl AlterTableData {
state: AlterTableState::Prepare,
task,
table_info_value,
physical_table_info,
cluster_id,
next_column_id,
}
@@ -450,6 +471,10 @@ impl AlterTableData {
fn table_info(&self) -> &RawTableInfo {
&self.table_info_value.table_info
}
fn physical_table_info(&self) -> Option<&(TableId, TableName)> {
self.physical_table_info.as_ref()
}
}
/// Creates region proto alter kind from `table_info` and `alter_kind`.

View File

@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::HashMap;
use std::ops::Deref;
use api::v1::region::region_request::Body as PbRegionRequest;
use api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader};
@@ -21,41 +20,31 @@ use api::v1::CreateTableExpr;
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use common_telemetry::info;
use common_telemetry::tracing_context::TracingContext;
use common_telemetry::{info, warn};
use futures_util::future::join_all;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::metadata::ColumnMetadata;
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
use snafu::{ensure, ResultExt};
use store_api::storage::{RegionId, RegionNumber};
use strum::AsRefStr;
use table::metadata::{RawTableInfo, TableId};
use crate::cache_invalidator::Context;
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error, region_storage_path};
use crate::ddl::{physical_table_metadata, DdlContext};
use crate::error::{
DecodeJsonSnafu, MetadataCorruptionSnafu, Result, TableAlreadyExistsSnafu,
TableInfoNotFoundSnafu,
};
use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoValue;
use crate::ddl::DdlContext;
use crate::error::{Result, TableAlreadyExistsSnafu};
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
use crate::lock_key::{TableLock, TableNameLock};
use crate::peer::Peer;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::table_name::TableName;
use crate::{metrics, ClusterId};
pub struct CreateLogicalTablesProcedure {
pub context: DdlContext,
pub data: CreateTablesData,
pub creator: TablesCreator,
}
impl CreateLogicalTablesProcedure {
@@ -67,29 +56,20 @@ impl CreateLogicalTablesProcedure {
physical_table_id: TableId,
context: DdlContext,
) -> Self {
let len = tasks.len();
let data = CreateTablesData {
cluster_id,
state: CreateTablesState::Prepare,
tasks,
table_ids_already_exists: vec![None; len],
physical_table_id,
physical_region_numbers: vec![],
physical_columns: vec![],
};
Self { context, data }
let creator = TablesCreator::new(cluster_id, tasks, physical_table_id);
Self { context, creator }
}
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(Self { context, data })
let creator = TablesCreator { data };
Ok(Self { context, creator })
}
/// On the prepares step, it performs:
/// - Checks whether physical table exists.
/// - Checks whether logical tables exist.
/// - Allocates the table ids.
/// - Modify tasks to sort logical columns on their names.
///
/// Abort(non-retry):
/// - The physical table does not exist.
@@ -99,17 +79,19 @@ impl CreateLogicalTablesProcedure {
let manager = &self.context.table_metadata_manager;
// Sets physical region numbers
let physical_table_id = self.data.physical_table_id();
let physical_table_id = self.creator.data.physical_table_id();
let physical_region_numbers = manager
.table_route_manager()
.get_physical_table_route(physical_table_id)
.await
.map(|(_, route)| TableRouteValue::Physical(route).region_numbers())?;
self.data
self.creator
.data
.set_physical_region_numbers(physical_region_numbers);
// Checks if the tables exist
let table_name_keys = self
.creator
.data
.all_create_table_exprs()
.iter()
@@ -124,7 +106,7 @@ impl CreateLogicalTablesProcedure {
.collect::<Vec<_>>();
// Validates the tasks
let tasks = &mut self.data.tasks;
let tasks = &mut self.creator.data.tasks;
for (task, table_id) in tasks.iter().zip(already_exists_tables_ids.iter()) {
if table_id.is_some() {
// If a table already exists, we just ignore it.
@@ -148,7 +130,7 @@ impl CreateLogicalTablesProcedure {
));
}
// Allocates table ids and sort columns on their names.
// Allocates table ids
for (task, table_id) in tasks.iter_mut().zip(already_exists_tables_ids.iter()) {
let table_id = if let Some(table_id) = table_id {
*table_id
@@ -159,19 +141,17 @@ impl CreateLogicalTablesProcedure {
.await?
};
task.set_table_id(table_id);
// sort columns in task
task.sort_columns();
}
self.data
self.creator
.data
.set_table_ids_already_exists(already_exists_tables_ids);
self.data.state = CreateTablesState::DatanodeCreateRegions;
self.creator.data.state = CreateTablesState::DatanodeCreateRegions;
Ok(Status::executing(true))
}
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
let physical_table_id = self.data.physical_table_id();
let physical_table_id = self.creator.data.physical_table_id();
let (_, physical_table_route) = self
.context
.table_metadata_manager
@@ -183,19 +163,18 @@ impl CreateLogicalTablesProcedure {
self.create_regions(region_routes).await
}
/// Creates table metadata for logical tables and update corresponding physical
/// table's metadata.
/// Creates table metadata
///
/// Abort(not-retry):
/// - Failed to create table metadata.
pub async fn on_create_metadata(&mut self) -> Result<Status> {
pub async fn on_create_metadata(&self) -> Result<Status> {
let manager = &self.context.table_metadata_manager;
let physical_table_id = self.data.physical_table_id();
let remaining_tasks = self.data.remaining_tasks();
let physical_table_id = self.creator.data.physical_table_id();
let remaining_tasks = self.creator.data.remaining_tasks();
let num_tables = remaining_tasks.len();
if num_tables > 0 {
let chunk_size = manager.create_logical_tables_metadata_chunk_size();
let chunk_size = manager.max_logical_tables_per_batch();
if num_tables > chunk_size {
let chunks = remaining_tasks
.into_iter()
@@ -216,56 +195,13 @@ impl CreateLogicalTablesProcedure {
// The `table_id` MUST be collected after the [Prepare::Prepare],
// ensures the all `table_id`s have been allocated.
let table_ids = self
.creator
.data
.tasks
.iter()
.map(|task| task.table_info.ident.table_id)
.collect::<Vec<_>>();
if !self.data.physical_columns.is_empty() {
// fetch old physical table's info
let physical_table_info = self
.context
.table_metadata_manager
.table_info_manager()
.get(self.data.physical_table_id)
.await?
.with_context(|| TableInfoNotFoundSnafu {
table: format!("table id - {}", self.data.physical_table_id),
})?;
// generate new table info
let new_table_info = self
.data
.build_new_physical_table_info(&physical_table_info);
let physical_table_name = TableName::new(
&new_table_info.catalog_name,
&new_table_info.schema_name,
&new_table_info.name,
);
// update physical table's metadata
self.context
.table_metadata_manager
.update_table_info(physical_table_info, new_table_info)
.await?;
// invalid table cache
self.context
.cache_invalidator
.invalidate(
&Context::default(),
vec![
CacheIdent::TableId(self.data.physical_table_id),
CacheIdent::TableName(physical_table_name),
],
)
.await?;
} else {
warn!("No physical columns found, leaving the physical table's schema unchanged");
}
info!("Created {num_tables} tables {table_ids:?} metadata for physical table {physical_table_id}");
Ok(Status::done_with_output(table_ids))
@@ -286,7 +222,7 @@ impl CreateLogicalTablesProcedure {
datanode: &Peer,
region_routes: &[RegionRoute],
) -> Result<CreateRequests> {
let create_tables_data = &self.data;
let create_tables_data = &self.creator.data;
let tasks = &create_tables_data.tasks;
let physical_table_id = create_tables_data.physical_table_id();
let regions = find_leader_regions(region_routes, datanode);
@@ -333,42 +269,15 @@ impl CreateLogicalTablesProcedure {
});
}
// collect response from datanodes
let raw_schemas = join_all(create_region_tasks)
join_all(create_region_tasks)
.await
.into_iter()
.map(|response| {
response.map(|mut response| response.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY))
})
.collect::<Result<Vec<_>>>()?;
if raw_schemas.is_empty() {
self.data.state = CreateTablesState::CreateMetadata;
return Ok(Status::executing(false));
}
self.creator.data.state = CreateTablesState::CreateMetadata;
// verify all datanodes return the same raw schemas
// Safety: previous check ensures this vector is not empty.
let first = raw_schemas.first().unwrap();
ensure!(
raw_schemas.iter().all(|x| x == first),
MetadataCorruptionSnafu {
err_msg: "Raw schemas from datanodes are not the same"
}
);
// decode raw schemas and store it
if let Some(raw_schema) = first {
let physical_columns =
ColumnMetadata::decode_list(raw_schema).context(DecodeJsonSnafu)?;
self.data.physical_columns = physical_columns;
} else {
warn!("creating logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
}
self.data.state = CreateTablesState::CreateMetadata;
Ok(Status::executing(true))
// Ensures the procedures after the crash start from the `DatanodeCreateRegions` stage.
Ok(Status::executing(false))
}
}
@@ -379,7 +288,7 @@ impl Procedure for CreateLogicalTablesProcedure {
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &self.data.state;
let state = &self.creator.data.state;
let _timer = metrics::METRIC_META_PROCEDURE_CREATE_TABLES
.with_label_values(&[state.as_ref()])
@@ -394,20 +303,13 @@ impl Procedure for CreateLogicalTablesProcedure {
}
fn dump(&self) -> ProcedureResult<String> {
serde_json::to_string(&self.data).context(ToJsonSnafu)
serde_json::to_string(&self.creator.data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
// CatalogLock, SchemaLock,
// TableLock
// TableNameLock(s)
let mut lock_key = Vec::with_capacity(2 + 1 + self.data.tasks.len());
let table_ref = self.data.tasks[0].table_ref();
lock_key.push(CatalogLock::Read(table_ref.catalog).into());
lock_key.push(SchemaLock::read(table_ref.catalog, table_ref.schema).into());
lock_key.push(TableLock::Write(self.data.physical_table_id()).into());
for task in &self.data.tasks {
let mut lock_key = Vec::with_capacity(1 + self.creator.data.tasks.len());
lock_key.push(TableLock::Write(self.creator.data.physical_table_id()).into());
for task in &self.creator.data.tasks {
lock_key.push(
TableNameLock::new(
&task.create_table.catalog_name,
@@ -421,6 +323,32 @@ impl Procedure for CreateLogicalTablesProcedure {
}
}
pub struct TablesCreator {
/// The serializable data.
pub data: CreateTablesData,
}
impl TablesCreator {
pub fn new(
cluster_id: ClusterId,
tasks: Vec<CreateTableTask>,
physical_table_id: TableId,
) -> Self {
let len = tasks.len();
Self {
data: CreateTablesData {
cluster_id,
state: CreateTablesState::Prepare,
tasks,
table_ids_already_exists: vec![None; len],
physical_table_id,
physical_region_numbers: vec![],
},
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateTablesData {
cluster_id: ClusterId,
@@ -429,7 +357,6 @@ pub struct CreateTablesData {
table_ids_already_exists: Vec<Option<TableId>>,
physical_table_id: TableId,
physical_region_numbers: Vec<RegionNumber>,
physical_columns: Vec<ColumnMetadata>,
}
impl CreateTablesData {
@@ -480,21 +407,6 @@ impl CreateTablesData {
})
.collect::<Vec<_>>()
}
/// Generate the new physical table info.
///
/// This method will consumes the physical columns.
fn build_new_physical_table_info(
&mut self,
old_table_info: &DeserializedValueWithBytes<TableInfoValue>,
) -> RawTableInfo {
let raw_table_info = old_table_info.deref().table_info.clone();
physical_table_metadata::build_new_physical_table_info(
raw_table_info,
&self.physical_columns,
)
}
}
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr)]

View File

@@ -35,10 +35,10 @@ use table::table_reference::TableReference;
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error, region_storage_path};
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::error::{self, Result};
use crate::error::{self, Result, TableRouteNotFoundSnafu};
use crate::key::table_name::TableNameKey;
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
use crate::key::table_route::TableRouteValue;
use crate::lock_key::TableNameLock;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{
@@ -69,7 +69,7 @@ impl CreateTableProcedure {
};
// Only registers regions if the table route is allocated.
if let Some(x) = &creator.data.table_route {
if let Some(TableRouteValue::Physical(x)) = &creator.data.table_route {
creator.opening_regions = creator
.register_opening_regions(&context, &x.region_routes)
.map_err(BoxedError::new)
@@ -97,7 +97,7 @@ impl CreateTableProcedure {
})
}
fn table_route(&self) -> Result<&PhysicalTableRouteValue> {
fn table_route(&self) -> Result<&TableRouteValue> {
self.creator
.data
.table_route
@@ -111,7 +111,7 @@ impl CreateTableProcedure {
pub fn set_allocated_metadata(
&mut self,
table_id: TableId,
table_route: PhysicalTableRouteValue,
table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) {
self.creator
@@ -192,10 +192,32 @@ impl CreateTableProcedure {
/// - [Code::DeadlineExceeded](tonic::status::Code::DeadlineExceeded)
/// - [Code::Unavailable](tonic::status::Code::Unavailable)
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
let table_route = self.table_route()?.clone();
let request_builder = self.new_region_request_builder(None)?;
self.create_regions(&table_route.region_routes, request_builder)
.await
// Safety: the table route must be allocated.
match self.table_route()?.clone() {
TableRouteValue::Physical(x) => {
let region_routes = x.region_routes.clone();
let request_builder = self.new_region_request_builder(None)?;
self.create_regions(&region_routes, request_builder).await
}
TableRouteValue::Logical(x) => {
let physical_table_id = x.physical_table_id();
let physical_table_route = self
.context
.table_metadata_manager
.table_route_manager()
.try_get_physical_table_route(physical_table_id)
.await?
.context(TableRouteNotFoundSnafu {
table_id: physical_table_id,
})?;
let region_routes = &physical_table_route.region_routes;
let request_builder = self.new_region_request_builder(Some(physical_table_id))?;
self.create_regions(region_routes, request_builder).await
}
}
}
async fn create_regions(
@@ -203,12 +225,15 @@ impl CreateTableProcedure {
region_routes: &[RegionRoute],
request_builder: CreateRequestBuilder,
) -> Result<Status> {
// Registers opening regions
let guards = self
.creator
.register_opening_regions(&self.context, region_routes)?;
if !guards.is_empty() {
self.creator.opening_regions = guards;
// Safety: the table_route must be allocated.
if self.table_route()?.is_physical() {
// Registers opening regions
let guards = self
.creator
.register_opening_regions(&self.context, region_routes)?;
if !guards.is_empty() {
self.creator.opening_regions = guards;
}
}
let create_table_data = &self.creator.data;
@@ -263,8 +288,9 @@ impl CreateTableProcedure {
self.creator.data.state = CreateTableState::CreateMetadata;
// Ensures the procedures after the crash start from the `DatanodeCreateRegions` stage.
// TODO(weny): Add more tests.
Ok(Status::executing(true))
Ok(Status::executing(false))
}
/// Creates table metadata
@@ -279,7 +305,7 @@ impl CreateTableProcedure {
// Safety: the region_wal_options must be allocated.
let region_wal_options = self.region_wal_options()?.clone();
// Safety: the table_route must be allocated.
let table_route = TableRouteValue::Physical(self.table_route()?.clone());
let table_route = self.table_route()?.clone();
manager
.create_table_metadata(raw_table_info, table_route, region_wal_options)
.await?;
@@ -317,11 +343,11 @@ impl Procedure for CreateTableProcedure {
fn lock_key(&self) -> LockKey {
let table_ref = &self.creator.data.table_ref();
LockKey::new(vec![
CatalogLock::Read(table_ref.catalog).into(),
SchemaLock::read(table_ref.catalog, table_ref.schema).into(),
TableNameLock::new(table_ref.catalog, table_ref.schema, table_ref.table).into(),
])
LockKey::single(TableNameLock::new(
table_ref.catalog,
table_ref.schema,
table_ref.table,
))
}
}
@@ -376,7 +402,7 @@ impl TableCreator {
fn set_allocated_metadata(
&mut self,
table_id: TableId,
table_route: PhysicalTableRouteValue,
table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) {
self.data.task.table_info.ident.table_id = table_id;
@@ -400,7 +426,7 @@ pub struct CreateTableData {
pub state: CreateTableState,
pub task: CreateTableTask,
/// None stands for not allocated yet.
table_route: Option<PhysicalTableRouteValue>,
table_route: Option<TableRouteValue>,
/// None stands for not allocated yet.
pub region_wal_options: Option<HashMap<RegionNumber, String>>,
pub cluster_id: ClusterId,

View File

@@ -1,175 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod cursor;
pub mod end;
pub mod executor;
pub mod metadata;
pub mod start;
use std::any::Any;
use std::fmt::Debug;
use common_procedure::error::{Error as ProcedureError, FromJsonSnafu, ToJsonSnafu};
use common_procedure::{
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
};
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use tonic::async_trait;
use self::start::DropDatabaseStart;
use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::table_name::TableNameValue;
use crate::lock_key::{CatalogLock, SchemaLock};
pub struct DropDatabaseProcedure {
/// The context of procedure runtime.
runtime_context: DdlContext,
context: DropDatabaseContext,
state: Box<dyn State>,
}
/// Target of dropping tables.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub(crate) enum DropTableTarget {
Logical,
Physical,
}
/// Context of [DropDatabaseProcedure] execution.
pub(crate) struct DropDatabaseContext {
catalog: String,
schema: String,
drop_if_exists: bool,
tables: Option<BoxStream<'static, Result<(String, TableNameValue)>>>,
}
#[async_trait::async_trait]
#[typetag::serde(tag = "drop_database_state")]
pub(crate) trait State: Send + Debug {
/// Yields the next [State] and [Status].
async fn next(
&mut self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)>;
/// Returns as [Any](std::any::Any).
fn as_any(&self) -> &dyn Any;
}
impl DropDatabaseProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropDatabase";
pub fn new(catalog: String, schema: String, drop_if_exists: bool, context: DdlContext) -> Self {
Self {
runtime_context: context,
context: DropDatabaseContext {
catalog,
schema,
drop_if_exists,
tables: None,
},
state: Box::new(DropDatabaseStart),
}
}
pub fn from_json(json: &str, runtime_context: DdlContext) -> ProcedureResult<Self> {
let DropDatabaseOwnedData {
catalog,
schema,
drop_if_exists,
state,
} = serde_json::from_str(json).context(FromJsonSnafu)?;
Ok(Self {
runtime_context,
context: DropDatabaseContext {
catalog,
schema,
drop_if_exists,
tables: None,
},
state,
})
}
}
#[async_trait]
impl Procedure for DropDatabaseProcedure {
fn type_name(&self) -> &str {
Self::TYPE_NAME
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &mut self.state;
let (next, status) = state
.next(&self.runtime_context, &mut self.context)
.await
.map_err(|e| {
if e.is_retry_later() {
ProcedureError::retry_later(e)
} else {
ProcedureError::external(e)
}
})?;
*state = next;
Ok(status)
}
fn dump(&self) -> ProcedureResult<String> {
let data = DropDatabaseData {
catalog: &self.context.catalog,
schema: &self.context.schema,
drop_if_exists: self.context.drop_if_exists,
state: self.state.as_ref(),
};
serde_json::to_string(&data).context(ToJsonSnafu)
}
fn lock_key(&self) -> LockKey {
let lock_key = vec![
CatalogLock::Read(&self.context.catalog).into(),
SchemaLock::write(&self.context.catalog, &self.context.schema).into(),
];
LockKey::new(lock_key)
}
}
#[derive(Debug, Serialize)]
struct DropDatabaseData<'a> {
// The catalog name
catalog: &'a str,
// The schema name
schema: &'a str,
drop_if_exists: bool,
state: &'a dyn State,
}
#[derive(Debug, Deserialize)]
struct DropDatabaseOwnedData {
// The catalog name
catalog: String,
// The schema name
schema: String,
drop_if_exists: bool,
state: Box<dyn State>,
}

View File

@@ -1,247 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_procedure::Status;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
use table::metadata::TableId;
use super::executor::DropDatabaseExecutor;
use super::metadata::DropDatabaseRemoveMetadata;
use super::DropTableTarget;
use crate::ddl::drop_database::{DropDatabaseContext, State};
use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::table_route::TableRouteValue;
use crate::table_name::TableName;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseCursor {
pub(crate) target: DropTableTarget,
}
impl DropDatabaseCursor {
/// Returns a new [DropDatabaseCursor].
pub fn new(target: DropTableTarget) -> Self {
Self { target }
}
fn handle_reach_end(
&mut self,
ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
// Consumes the tables stream.
ctx.tables.take();
match self.target {
DropTableTarget::Logical => Ok((
Box::new(DropDatabaseCursor::new(DropTableTarget::Physical)),
Status::executing(true),
)),
DropTableTarget::Physical => Ok((
Box::new(DropDatabaseRemoveMetadata),
Status::executing(true),
)),
}
}
async fn handle_table(
&mut self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
table_name: String,
table_id: TableId,
table_route_value: TableRouteValue,
) -> Result<(Box<dyn State>, Status)> {
match (self.target, table_route_value) {
(DropTableTarget::Logical, TableRouteValue::Logical(route)) => {
let physical_table_id = route.physical_table_id();
let (_, table_route) = ddl_ctx
.table_metadata_manager
.table_route_manager()
.get_physical_table_route(physical_table_id)
.await?;
Ok((
Box::new(DropDatabaseExecutor::new(
table_id,
TableName::new(&ctx.catalog, &ctx.schema, &table_name),
table_route.region_routes,
self.target,
)),
Status::executing(true),
))
}
(DropTableTarget::Physical, TableRouteValue::Physical(table_route)) => Ok((
Box::new(DropDatabaseExecutor::new(
table_id,
TableName::new(&ctx.catalog, &ctx.schema, &table_name),
table_route.region_routes,
self.target,
)),
Status::executing(true),
)),
_ => Ok((
Box::new(DropDatabaseCursor::new(self.target)),
Status::executing(false),
)),
}
}
}
#[async_trait::async_trait]
#[typetag::serde]
impl State for DropDatabaseCursor {
async fn next(
&mut self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
if ctx.tables.as_deref().is_none() {
let tables = ddl_ctx
.table_metadata_manager
.table_name_manager()
.tables(&ctx.catalog, &ctx.schema);
ctx.tables = Some(tables);
}
// Safety: must exist
match ctx.tables.as_mut().unwrap().try_next().await? {
Some((table_name, table_name_value)) => {
let table_id = table_name_value.table_id();
match ddl_ctx
.table_metadata_manager
.table_route_manager()
.table_route_storage()
.get(table_id)
.await?
{
Some(table_route_value) => {
self.handle_table(ddl_ctx, ctx, table_name, table_id, table_route_value)
.await
}
None => Ok((
Box::new(DropDatabaseCursor::new(self.target)),
Status::executing(false),
)),
}
}
None => self.handle_reach_end(ctx),
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
use crate::ddl::drop_database::metadata::DropDatabaseRemoveMetadata;
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
use crate::ddl::test_util::{create_logical_table, create_physical_table};
use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_next_without_logical_tables() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
create_physical_table(ddl_context.clone(), 0, "phy").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
// Ticks
let (mut state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(!status.need_persist());
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert_eq!(cursor.target, DropTableTarget::Logical);
// Ticks
let (mut state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(status.need_persist());
assert!(ctx.tables.is_none());
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert_eq!(cursor.target, DropTableTarget::Physical);
// Ticks
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(status.need_persist());
let executor = state
.as_any()
.downcast_ref::<DropDatabaseExecutor>()
.unwrap();
assert_eq!(executor.target, DropTableTarget::Physical);
}
#[tokio::test]
async fn test_next_with_logical_tables() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
// Ticks
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(status.need_persist());
let executor = state
.as_any()
.downcast_ref::<DropDatabaseExecutor>()
.unwrap();
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
.get_physical_table_route(physical_table_id)
.await
.unwrap();
assert_eq!(table_route.region_routes, executor.region_routes);
assert_eq!(executor.target, DropTableTarget::Logical);
}
#[tokio::test]
async fn test_reach_the_end() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let mut state = DropDatabaseCursor::new(DropTableTarget::Physical);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
// Ticks
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(status.need_persist());
state
.as_any()
.downcast_ref::<DropDatabaseRemoveMetadata>()
.unwrap();
assert!(ctx.tables.is_none());
}
}

View File

@@ -1,41 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
use crate::ddl::drop_database::{DropDatabaseContext, State};
use crate::ddl::DdlContext;
use crate::error::Result;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseEnd;
#[async_trait::async_trait]
#[typetag::serde]
impl State for DropDatabaseEnd {
async fn next(
&mut self,
_: &DdlContext,
_: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
Ok((Box::new(DropDatabaseEnd), Status::done()))
}
fn as_any(&self) -> &dyn Any {
self
}
}

View File

@@ -1,296 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_procedure::Status;
use common_telemetry::info;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
use super::cursor::DropDatabaseCursor;
use super::{DropDatabaseContext, DropTableTarget};
use crate::ddl::drop_database::State;
use crate::ddl::drop_table::executor::DropTableExecutor;
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::router::{operating_leader_regions, RegionRoute};
use crate::table_name::TableName;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseExecutor {
table_id: TableId,
table_name: TableName,
pub(crate) region_routes: Vec<RegionRoute>,
pub(crate) target: DropTableTarget,
#[serde(skip)]
dropping_regions: Vec<OperatingRegionGuard>,
}
impl DropDatabaseExecutor {
/// Returns a new [DropDatabaseExecutor].
pub fn new(
table_id: TableId,
table_name: TableName,
region_routes: Vec<RegionRoute>,
target: DropTableTarget,
) -> Self {
Self {
table_name,
table_id,
region_routes,
target,
dropping_regions: vec![],
}
}
}
impl DropDatabaseExecutor {
fn register_dropping_regions(&mut self, ddl_ctx: &DdlContext) -> Result<()> {
let dropping_regions = operating_leader_regions(&self.region_routes);
let mut dropping_region_guards = Vec::with_capacity(dropping_regions.len());
for (region_id, datanode_id) in dropping_regions {
let guard = ddl_ctx
.memory_region_keeper
.register(datanode_id, region_id)
.context(error::RegionOperatingRaceSnafu {
region_id,
peer_id: datanode_id,
})?;
dropping_region_guards.push(guard);
}
self.dropping_regions = dropping_region_guards;
Ok(())
}
}
#[async_trait::async_trait]
#[typetag::serde]
impl State for DropDatabaseExecutor {
async fn next(
&mut self,
ddl_ctx: &DdlContext,
_ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
self.register_dropping_regions(ddl_ctx)?;
let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true);
executor
.on_remove_metadata(ddl_ctx, &self.region_routes)
.await?;
executor.invalidate_table_cache(ddl_ctx).await?;
executor
.on_drop_regions(ddl_ctx, &self.region_routes)
.await?;
info!("Table: {}({}) is dropped", self.table_name, self.table_id);
Ok((
Box::new(DropDatabaseCursor::new(self.target)),
Status::executing(false),
))
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_recordbatch::SendableRecordBatchStream;
use crate::datanode_manager::HandleResponse;
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
use crate::ddl::test_util::{create_logical_table, create_physical_table};
use crate::error::{self, Error, Result};
use crate::peer::Peer;
use crate::table_name::TableName;
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
#[derive(Clone)]
pub struct NaiveDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for NaiveDatanodeHandler {
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
Ok(HandleResponse::new(0))
}
async fn handle_query(
&self,
_peer: &Peer,
_request: QueryRequest,
) -> Result<SendableRecordBatchStream> {
unreachable!()
}
}
#[tokio::test]
async fn test_next_with_physical_table() {
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
.get_physical_table_route(physical_table_id)
.await
.unwrap();
{
let mut state = DropDatabaseExecutor::new(
physical_table_id,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
table_route.region_routes.clone(),
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(!status.need_persist());
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert_eq!(cursor.target, DropTableTarget::Physical);
}
// Execute again
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
let mut state = DropDatabaseExecutor::new(
physical_table_id,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
table_route.region_routes,
DropTableTarget::Physical,
);
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(!status.need_persist());
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert_eq!(cursor.target, DropTableTarget::Physical);
}
#[tokio::test]
async fn test_next_logical_table() {
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
let logical_table_id = physical_table_id + 1;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
.get_physical_table_route(logical_table_id)
.await
.unwrap();
{
let mut state = DropDatabaseExecutor::new(
physical_table_id,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "metric"),
table_route.region_routes.clone(),
DropTableTarget::Logical,
);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(!status.need_persist());
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert_eq!(cursor.target, DropTableTarget::Logical);
}
// Execute again
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
let mut state = DropDatabaseExecutor::new(
physical_table_id,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
table_route.region_routes,
DropTableTarget::Logical,
);
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
assert!(!status.need_persist());
let cursor = state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert_eq!(cursor.target, DropTableTarget::Logical);
}
#[derive(Clone)]
pub struct RetryErrorDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
Err(Error::RetryLater {
source: BoxedError::new(
error::UnexpectedSnafu {
err_msg: "retry later",
}
.build(),
),
})
}
async fn handle_query(
&self,
_peer: &Peer,
_request: QueryRequest,
) -> Result<SendableRecordBatchStream> {
unreachable!()
}
}
#[tokio::test]
async fn test_next_retryable_err() {
let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
let physical_table_id = create_physical_table(ddl_context.clone(), 0, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
.get_physical_table_route(physical_table_id)
.await
.unwrap();
let mut state = DropDatabaseExecutor::new(
physical_table_id,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy"),
table_route.region_routes,
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
tables: None,
};
let err = state.next(&ddl_context, &mut ctx).await.unwrap_err();
assert!(err.is_retry_later());
}
}

View File

@@ -1,99 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
use super::end::DropDatabaseEnd;
use crate::ddl::drop_database::{DropDatabaseContext, State};
use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::schema_name::SchemaNameKey;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseRemoveMetadata;
#[async_trait::async_trait]
#[typetag::serde]
impl State for DropDatabaseRemoveMetadata {
async fn next(
&mut self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
ddl_ctx
.table_metadata_manager
.schema_manager()
.delete(SchemaNameKey::new(&ctx.catalog, &ctx.schema))
.await?;
return Ok((Box::new(DropDatabaseEnd), Status::done()));
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use crate::ddl::drop_database::end::DropDatabaseEnd;
use crate::ddl::drop_database::metadata::DropDatabaseRemoveMetadata;
use crate::ddl::drop_database::{DropDatabaseContext, State};
use crate::key::schema_name::SchemaNameKey;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_next() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
ddl_context
.table_metadata_manager
.schema_manager()
.create(SchemaNameKey::new("foo", "bar"), None, true)
.await
.unwrap();
let mut state = DropDatabaseRemoveMetadata;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
tables: None,
};
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
assert!(status.is_done());
assert!(!ddl_context
.table_metadata_manager
.schema_manager()
.exists(SchemaNameKey::new("foo", "bar"))
.await
.unwrap());
// Schema not exists
let mut state = DropDatabaseRemoveMetadata;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
tables: None,
};
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
assert!(status.is_done());
}
}

View File

@@ -1,138 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
use snafu::ensure;
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
use crate::ddl::drop_database::end::DropDatabaseEnd;
use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State};
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::key::schema_name::SchemaNameKey;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseStart;
#[async_trait::async_trait]
#[typetag::serde]
impl State for DropDatabaseStart {
/// Checks whether schema exists.
/// - Early returns if schema not exists and `drop_if_exists` is `true`.
/// - Throws an error if schema not exists and `drop_if_exists` is `false`.
async fn next(
&mut self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
let exists = ddl_ctx
.table_metadata_manager
.schema_manager()
.exists(SchemaNameKey {
catalog: &ctx.catalog,
schema: &ctx.schema,
})
.await?;
if !exists && ctx.drop_if_exists {
return Ok((Box::new(DropDatabaseEnd), Status::done()));
}
ensure!(
exists,
error::SchemaNotFoundSnafu {
table_schema: &ctx.schema,
}
);
Ok((
Box::new(DropDatabaseCursor::new(DropTableTarget::Logical)),
Status::executing(true),
))
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::sync::Arc;
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
use crate::ddl::drop_database::end::DropDatabaseEnd;
use crate::ddl::drop_database::start::DropDatabaseStart;
use crate::ddl::drop_database::{DropDatabaseContext, State};
use crate::error;
use crate::key::schema_name::SchemaNameKey;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_schema_not_exists_err() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let mut step = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: false,
tables: None,
};
let err = step.next(&ddl_context, &mut ctx).await.unwrap_err();
assert_matches!(err, error::Error::SchemaNotFound { .. });
}
#[tokio::test]
async fn test_schema_not_exists() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
tables: None,
};
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
state.as_any().downcast_ref::<DropDatabaseEnd>().unwrap();
assert!(status.is_done());
}
#[tokio::test]
async fn test_next() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
ddl_context
.table_metadata_manager
.schema_manager()
.create(SchemaNameKey::new("foo", "bar"), None, true)
.await
.unwrap();
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: false,
tables: None,
};
let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap();
state.as_any().downcast_ref::<DropDatabaseCursor>().unwrap();
assert!(status.need_persist());
}
}

View File

@@ -12,32 +12,42 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod executor;
use api::v1::region::{
region_request, DropRequest as PbDropRegionRequest, RegionRequest, RegionRequestHeader,
};
use async_trait::async_trait;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
use common_procedure::{
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
};
use common_telemetry::info;
use common_telemetry::tracing_context::TracingContext;
use common_telemetry::{debug, info};
use futures::future::join_all;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionId;
use strum::AsRefStr;
use table::metadata::{RawTableInfo, TableId};
use table::table_reference::TableReference;
use self::executor::DropTableExecutor;
use crate::ddl::utils::handle_retry_error;
use super::utils::handle_retry_error;
use crate::cache_invalidator::Context;
use crate::ddl::utils::add_peer_context_if_needed;
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::DropTableTask;
use crate::rpc::router::{operating_leader_regions, RegionRoute};
use crate::rpc::router::{
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
};
pub struct DropTableProcedure {
/// The context of procedure runtime.
@@ -48,6 +58,7 @@ pub struct DropTableProcedure {
pub dropping_regions: Vec<OperatingRegionGuard>,
}
#[allow(dead_code)]
impl DropTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
@@ -74,10 +85,31 @@ impl DropTableProcedure {
})
}
async fn on_prepare<'a>(&mut self, executor: &DropTableExecutor) -> Result<Status> {
if executor.on_prepare(&self.context).await?.stop() {
async fn on_prepare(&mut self) -> Result<Status> {
let table_ref = &self.data.table_ref();
let exist = self
.context
.table_metadata_manager
.table_name_manager()
.exists(TableNameKey::new(
table_ref.catalog,
table_ref.schema,
table_ref.table,
))
.await?;
if !exist && self.data.task.drop_if_exists {
return Ok(Status::done());
}
ensure!(
exist,
error::TableNotFoundSnafu {
table_name: table_ref.to_string()
}
);
self.data.state = DropTableState::RemoveMetadata;
Ok(Status::executing(true))
@@ -112,34 +144,98 @@ impl DropTableProcedure {
}
/// Removes the table metadata.
async fn on_remove_metadata(&mut self, executor: &DropTableExecutor) -> Result<Status> {
self.register_dropping_regions()?;
async fn on_remove_metadata(&mut self) -> Result<Status> {
// NOTES: If the meta server is crashed after the `RemoveMetadata`,
// Corresponding regions of this table on the Datanode will be closed automatically.
// Then any future dropping operation will fail.
// TODO(weny): Considers introducing a RegionStatus to indicate the region is dropping.
let table_metadata_manager = &self.context.table_metadata_manager;
let table_info_value = &self.data.table_info_value;
let table_route_value = &self.data.table_route_value;
let table_id = self.data.table_id();
executor
.on_remove_metadata(&self.context, self.data.region_routes()?)
table_metadata_manager
.delete_table_metadata(table_info_value, table_route_value)
.await?;
info!("Deleted table metadata for table {table_id}");
self.data.state = DropTableState::InvalidateTableCache;
Ok(Status::executing(true))
}
/// Broadcasts invalidate table cache instruction.
async fn on_broadcast(&mut self, executor: &DropTableExecutor) -> Result<Status> {
executor.invalidate_table_cache(&self.context).await?;
async fn on_broadcast(&mut self) -> Result<Status> {
let ctx = Context {
subject: Some("Invalidate table cache by dropping table".to_string()),
};
let cache_invalidator = &self.context.cache_invalidator;
cache_invalidator
.invalidate_table_name(&ctx, self.data.table_ref().into())
.await?;
cache_invalidator
.invalidate_table_id(&ctx, self.data.table_id())
.await?;
self.data.state = DropTableState::DatanodeDropRegions;
Ok(Status::executing(true))
}
pub async fn on_datanode_drop_regions(&self, executor: &DropTableExecutor) -> Result<Status> {
executor
.on_drop_regions(&self.context, self.data.region_routes()?)
.await?;
pub async fn on_datanode_drop_regions(&self) -> Result<Status> {
let table_id = self.data.table_id();
let region_routes = &self.data.region_routes()?;
let leaders = find_leaders(region_routes);
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
let requester = self.context.datanode_manager.datanode(&datanode).await;
let regions = find_leader_regions(region_routes, &datanode);
let region_ids = regions
.iter()
.map(|region_number| RegionId::new(table_id, *region_number))
.collect::<Vec<_>>();
for region_id in region_ids {
debug!("Dropping region {region_id} on Datanode {datanode:?}");
let request = RegionRequest {
header: Some(RegionRequestHeader {
tracing_context: TracingContext::from_current_span().to_w3c(),
..Default::default()
}),
body: Some(region_request::Body::Drop(PbDropRegionRequest {
region_id: region_id.as_u64(),
})),
};
let datanode = datanode.clone();
let requester = requester.clone();
drop_region_tasks.push(async move {
if let Err(err) = requester.handle(request).await {
if err.status_code() != StatusCode::RegionNotFound {
return Err(add_peer_context_if_needed(datanode)(err));
}
}
Ok(())
});
}
}
join_all(drop_region_tasks)
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
Ok(Status::done())
}
}
@@ -151,21 +247,17 @@ impl Procedure for DropTableProcedure {
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let executor = DropTableExecutor::new(
self.data.task.table_name(),
self.data.table_id(),
self.data.task.drop_if_exists,
);
let state = &self.data.state;
let _timer = metrics::METRIC_META_PROCEDURE_DROP_TABLE
.with_label_values(&[state.as_ref()])
.start_timer();
match self.data.state {
DropTableState::Prepare => self.on_prepare(&executor).await,
DropTableState::RemoveMetadata => self.on_remove_metadata(&executor).await,
DropTableState::InvalidateTableCache => self.on_broadcast(&executor).await,
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions(&executor).await,
DropTableState::Prepare => self.on_prepare().await,
DropTableState::RemoveMetadata => self.on_remove_metadata().await,
DropTableState::InvalidateTableCache => self.on_broadcast().await,
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions().await,
}
.map_err(handle_retry_error)
}
@@ -188,7 +280,6 @@ impl Procedure for DropTableProcedure {
}
#[derive(Debug, Serialize, Deserialize)]
/// TODO(weny): simplify the table data.
pub struct DropTableData {
pub state: DropTableState,
pub cluster_id: u64,

View File

@@ -1,277 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::region::{
region_request, DropRequest as PbDropRegionRequest, RegionRequest, RegionRequestHeader,
};
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_telemetry::debug;
use common_telemetry::tracing_context::TracingContext;
use futures::future::join_all;
use snafu::ensure;
use store_api::storage::RegionId;
use table::metadata::TableId;
use crate::cache_invalidator::Context;
use crate::ddl::utils::add_peer_context_if_needed;
use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::table_name::TableName;
/// [Control] indicated to the caller whether to go to the next step.
#[derive(Debug)]
pub enum Control<T> {
Continue(T),
Stop,
}
impl<T> Control<T> {
/// Returns true if it's [Control::Stop].
pub fn stop(&self) -> bool {
matches!(self, Control::Stop)
}
}
impl DropTableExecutor {
/// Returns the [DropTableExecutor].
pub fn new(table: TableName, table_id: TableId, drop_if_exists: bool) -> Self {
Self {
table,
table_id,
drop_if_exists,
}
}
}
/// [DropTableExecutor] performs:
/// - Drops the metadata of the table.
/// - Invalidates the cache on the Frontend nodes.
/// - Drops the regions on the Datanode nodes.
pub struct DropTableExecutor {
table: TableName,
table_id: TableId,
drop_if_exists: bool,
}
impl DropTableExecutor {
/// Checks whether table exists.
/// - Early returns if table not exists and `drop_if_exists` is `true`.
/// - Throws an error if table not exists and `drop_if_exists` is `false`.
pub async fn on_prepare(&self, ctx: &DdlContext) -> Result<Control<()>> {
let table_ref = self.table.table_ref();
let exist = ctx
.table_metadata_manager
.table_name_manager()
.exists(TableNameKey::new(
table_ref.catalog,
table_ref.schema,
table_ref.table,
))
.await?;
if !exist && self.drop_if_exists {
return Ok(Control::Stop);
}
ensure!(
exist,
error::TableNotFoundSnafu {
table_name: table_ref.to_string()
}
);
Ok(Control::Continue(()))
}
/// Removes the table metadata.
pub async fn on_remove_metadata(
&self,
ctx: &DdlContext,
region_routes: &[RegionRoute],
) -> Result<()> {
ctx.table_metadata_manager
.delete_table_metadata(self.table_id, &self.table, region_routes)
.await
}
/// Invalidates frontend caches
pub async fn invalidate_table_cache(&self, ctx: &DdlContext) -> Result<()> {
let cache_invalidator = &ctx.cache_invalidator;
let ctx = Context {
subject: Some("Invalidate table cache by dropping table".to_string()),
};
cache_invalidator
.invalidate(
&ctx,
vec![
CacheIdent::TableName(self.table.table_ref().into()),
CacheIdent::TableId(self.table_id),
],
)
.await?;
Ok(())
}
/// Drops region on datanode.
pub async fn on_drop_regions(
&self,
ctx: &DdlContext,
region_routes: &[RegionRoute],
) -> Result<()> {
let leaders = find_leaders(region_routes);
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
let table_id = self.table_id;
for datanode in leaders {
let requester = ctx.datanode_manager.datanode(&datanode).await;
let regions = find_leader_regions(region_routes, &datanode);
let region_ids = regions
.iter()
.map(|region_number| RegionId::new(table_id, *region_number))
.collect::<Vec<_>>();
for region_id in region_ids {
debug!("Dropping region {region_id} on Datanode {datanode:?}");
let request = RegionRequest {
header: Some(RegionRequestHeader {
tracing_context: TracingContext::from_current_span().to_w3c(),
..Default::default()
}),
body: Some(region_request::Body::Drop(PbDropRegionRequest {
region_id: region_id.as_u64(),
})),
};
let datanode = datanode.clone();
let requester = requester.clone();
drop_region_tasks.push(async move {
if let Err(err) = requester.handle(request).await {
if err.status_code() != StatusCode::RegionNotFound {
return Err(add_peer_context_if_needed(datanode)(err));
}
}
Ok(())
});
}
}
join_all(drop_region_tasks)
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::Arc;
use api::v1::{ColumnDataType, SemanticType};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use table::metadata::RawTableInfo;
use super::*;
use crate::ddl::test_util::columns::TestColumnDefBuilder;
use crate::ddl::test_util::create_table::{
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
};
use crate::key::table_route::TableRouteValue;
use crate::table_name::TableName;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
fn test_create_raw_table_info(name: &str) -> RawTableInfo {
let create_table = TestCreateTableExprBuilder::default()
.column_defs([
TestColumnDefBuilder::default()
.name("ts")
.data_type(ColumnDataType::TimestampMillisecond)
.semantic_type(SemanticType::Timestamp)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("host")
.data_type(ColumnDataType::String)
.semantic_type(SemanticType::Tag)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("cpu")
.data_type(ColumnDataType::Float64)
.semantic_type(SemanticType::Field)
.build()
.unwrap()
.into(),
])
.time_index("ts")
.primary_keys(["host".into()])
.table_name(name)
.build()
.unwrap()
.into();
build_raw_table_info_from_expr(&create_table)
}
#[tokio::test]
async fn test_on_prepare() {
// Drops if exists
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ctx = new_ddl_context(datanode_manager);
let executor = DropTableExecutor::new(
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
true,
);
let ctrl = executor.on_prepare(&ctx).await.unwrap();
assert!(ctrl.stop());
// Drops a non-exists table
let executor = DropTableExecutor::new(
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
false,
);
let err = executor.on_prepare(&ctx).await.unwrap_err();
assert_matches!(err, error::Error::TableNotFound { .. });
// Drops a exists table
let executor = DropTableExecutor::new(
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
false,
);
let raw_table_info = test_create_raw_table_info("my_table");
ctx.table_metadata_manager
.create_table_metadata(
raw_table_info,
TableRouteValue::physical(vec![]),
HashMap::new(),
)
.await
.unwrap();
let ctrl = executor.on_prepare(&ctx).await.unwrap();
assert!(!ctrl.stop());
}
}

View File

@@ -1,56 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use api::v1::SemanticType;
use store_api::metadata::ColumnMetadata;
use table::metadata::RawTableInfo;
/// Generate the new physical table info.
pub(crate) fn build_new_physical_table_info(
mut raw_table_info: RawTableInfo,
physical_columns: &[ColumnMetadata],
) -> RawTableInfo {
let existing_columns = raw_table_info
.meta
.schema
.column_schemas
.iter()
.map(|col| col.name.clone())
.collect::<HashSet<_>>();
let primary_key_indices = &mut raw_table_info.meta.primary_key_indices;
let value_indices = &mut raw_table_info.meta.value_indices;
value_indices.clear();
let time_index = &mut raw_table_info.meta.schema.timestamp_index;
let columns = &mut raw_table_info.meta.schema.column_schemas;
columns.clear();
for (idx, col) in physical_columns.iter().enumerate() {
match col.semantic_type {
SemanticType::Tag => {
// push new primary key to the end.
if !existing_columns.contains(&col.column_schema.name) {
primary_key_indices.push(idx);
}
}
SemanticType::Field => value_indices.push(idx),
SemanticType::Timestamp => *time_index = Some(idx),
}
columns.push(col.column_schema.clone());
}
raw_table_info
}

View File

@@ -16,13 +16,16 @@ use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use common_catalog::consts::METRIC_ENGINE;
use common_telemetry::{debug, info};
use snafu::ensure;
use snafu::{ensure, OptionExt};
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
use store_api::storage::{RegionId, RegionNumber, TableId};
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
use crate::error::{self, Result, UnsupportedSnafu};
use crate::key::table_route::PhysicalTableRouteValue;
use crate::error::{self, Result, TableNotFoundSnafu, UnsupportedSnafu};
use crate::key::table_name::{TableNameKey, TableNameManager};
use crate::key::table_route::{LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue};
use crate::peer::Peer;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{Region, RegionRoute};
@@ -35,6 +38,7 @@ pub type TableMetadataAllocatorRef = Arc<TableMetadataAllocator>;
pub struct TableMetadataAllocator {
table_id_sequence: SequenceRef,
wal_options_allocator: WalOptionsAllocatorRef,
table_name_manager: TableNameManager,
peer_allocator: PeerAllocatorRef,
}
@@ -42,10 +46,12 @@ impl TableMetadataAllocator {
pub fn new(
table_id_sequence: SequenceRef,
wal_options_allocator: WalOptionsAllocatorRef,
table_name_manager: TableNameManager,
) -> Self {
Self::with_peer_allocator(
table_id_sequence,
wal_options_allocator,
table_name_manager,
Arc::new(NoopPeerAllocator),
)
}
@@ -53,11 +59,13 @@ impl TableMetadataAllocator {
pub fn with_peer_allocator(
table_id_sequence: SequenceRef,
wal_options_allocator: WalOptionsAllocatorRef,
table_name_manager: TableNameManager,
peer_allocator: PeerAllocatorRef,
) -> Self {
Self {
table_id_sequence,
wal_options_allocator,
table_name_manager,
peer_allocator,
}
}
@@ -94,14 +102,19 @@ impl TableMetadataAllocator {
fn create_wal_options(
&self,
table_route: &PhysicalTableRouteValue,
table_route: &TableRouteValue,
) -> Result<HashMap<RegionNumber, String>> {
let region_numbers = table_route
.region_routes
.iter()
.map(|route| route.region.id.region_number())
.collect();
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
match table_route {
TableRouteValue::Physical(x) => {
let region_numbers = x
.region_routes
.iter()
.map(|route| route.region.id.region_number())
.collect();
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
}
TableRouteValue::Logical(_) => Ok(HashMap::new()),
}
}
async fn create_table_route(
@@ -109,7 +122,7 @@ impl TableMetadataAllocator {
ctx: &TableMetadataAllocatorContext,
table_id: TableId,
task: &CreateTableTask,
) -> Result<PhysicalTableRouteValue> {
) -> Result<TableRouteValue> {
let regions = task.partitions.len();
ensure!(
regions > 0,
@@ -118,29 +131,56 @@ impl TableMetadataAllocator {
}
);
let peers = self.peer_allocator.alloc(ctx, regions).await?;
let region_routes = task
.partitions
.iter()
.enumerate()
.map(|(i, partition)| {
let region = Region {
id: RegionId::new(table_id, i as u32),
partition: Some(partition.clone().into()),
..Default::default()
};
let table_route = if task.create_table.engine == METRIC_ENGINE
&& let Some(physical_table_name) = task
.create_table
.table_options
.get(LOGICAL_TABLE_METADATA_KEY)
{
let physical_table_id = self
.table_name_manager
.get(TableNameKey::new(
&task.create_table.catalog_name,
&task.create_table.schema_name,
physical_table_name,
))
.await?
.context(TableNotFoundSnafu {
table_name: physical_table_name,
})?
.table_id();
let peer = peers[i % peers.len()].clone();
let region_ids = (0..regions)
.map(|i| RegionId::new(table_id, i as RegionNumber))
.collect();
RegionRoute {
region,
leader_peer: Some(peer),
..Default::default()
}
})
.collect::<Vec<_>>();
TableRouteValue::Logical(LogicalTableRouteValue::new(physical_table_id, region_ids))
} else {
let peers = self.peer_allocator.alloc(ctx, regions).await?;
Ok(PhysicalTableRouteValue::new(region_routes))
let region_routes = task
.partitions
.iter()
.enumerate()
.map(|(i, partition)| {
let region = Region {
id: RegionId::new(table_id, i as u32),
partition: Some(partition.clone().into()),
..Default::default()
};
let peer = peers[i % peers.len()].clone();
RegionRoute {
region,
leader_peer: Some(peer),
..Default::default()
}
})
.collect::<Vec<_>>();
TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
};
Ok(table_route)
}
pub async fn create(
@@ -163,6 +203,15 @@ impl TableMetadataAllocator {
region_wal_options,
})
}
/// Sets table ids with all tasks.
pub async fn set_table_ids_on_logic_create(&self, tasks: &mut [CreateTableTask]) -> Result<()> {
for task in tasks {
let table_id = self.allocate_table_id(task).await?;
task.table_info.ident.table_id = table_id;
}
Ok(())
}
}
pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;

View File

@@ -12,161 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod alter_table;
pub mod columns;
pub mod create_table;
use std::collections::HashMap;
use api::v1::meta::Partition;
use api::v1::{ColumnDataType, SemanticType};
use common_procedure::Status;
use table::metadata::{RawTableInfo, TableId};
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
use crate::ddl::test_util::columns::TestColumnDefBuilder;
use crate::ddl::test_util::create_table::{
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
pub use create_table::{
TestColumnDef, TestColumnDefBuilder, TestCreateTableExpr, TestCreateTableExprBuilder,
};
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::CreateTableTask;
use crate::ClusterId;
pub async fn create_physical_table_metadata(
ddl_context: &DdlContext,
table_info: RawTableInfo,
table_route: TableRouteValue,
) {
ddl_context
.table_metadata_manager
.create_table_metadata(table_info, table_route, HashMap::default())
.await
.unwrap();
}
pub async fn create_physical_table(
ddl_context: DdlContext,
cluster_id: ClusterId,
name: &str,
) -> TableId {
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task(name);
let TableMetadata {
table_id,
table_route,
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
)
.await;
table_id
}
pub async fn create_logical_table(
ddl_context: DdlContext,
cluster_id: ClusterId,
physical_table_id: TableId,
table_name: &str,
) {
use std::assert_matches::assert_matches;
let tasks = vec![test_create_logical_table_task(table_name)];
let mut procedure =
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let status = procedure.on_create_metadata().await.unwrap();
assert_matches!(status, Status::Done { .. });
}
pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
let create_table = TestCreateTableExprBuilder::default()
.column_defs([
TestColumnDefBuilder::default()
.name("ts")
.data_type(ColumnDataType::TimestampMillisecond)
.semantic_type(SemanticType::Timestamp)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("host")
.data_type(ColumnDataType::String)
.semantic_type(SemanticType::Tag)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("cpu")
.data_type(ColumnDataType::Float64)
.semantic_type(SemanticType::Field)
.build()
.unwrap()
.into(),
])
.time_index("ts")
.primary_keys(["host".into()])
.table_name(name)
.build()
.unwrap()
.into();
let table_info = build_raw_table_info_from_expr(&create_table);
CreateTableTask {
create_table,
// Single region
partitions: vec![Partition {
column_list: vec![],
value_list: vec![],
}],
table_info,
}
}
pub fn test_create_physical_table_task(name: &str) -> CreateTableTask {
let create_table = TestCreateTableExprBuilder::default()
.column_defs([
TestColumnDefBuilder::default()
.name("ts")
.data_type(ColumnDataType::TimestampMillisecond)
.semantic_type(SemanticType::Timestamp)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("value")
.data_type(ColumnDataType::Float64)
.semantic_type(SemanticType::Field)
.build()
.unwrap()
.into(),
])
.time_index("ts")
.primary_keys(["value".into()])
.table_name(name)
.build()
.unwrap()
.into();
let table_info = build_raw_table_info_from_expr(&create_table);
CreateTableTask {
create_table,
// Single region
partitions: vec![Partition {
column_list: vec![],
value_list: vec![],
}],
table_info,
}
}

View File

@@ -1,62 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::alter_expr::Kind;
use api::v1::{AddColumn, AddColumns, AlterExpr, ColumnDef, RenameTable};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use derive_builder::Builder;
#[derive(Default, Builder)]
#[builder(default)]
pub struct TestAlterTableExpr {
#[builder(setter(into), default = "DEFAULT_CATALOG_NAME.to_string()")]
catalog_name: String,
#[builder(setter(into), default = "DEFAULT_SCHEMA_NAME.to_string()")]
schema_name: String,
#[builder(setter(into))]
table_name: String,
#[builder(setter(into))]
add_columns: Vec<ColumnDef>,
#[builder(setter(into))]
new_table_name: Option<String>,
}
impl From<TestAlterTableExpr> for AlterExpr {
fn from(value: TestAlterTableExpr) -> Self {
if let Some(new_table_name) = value.new_table_name {
Self {
catalog_name: value.catalog_name,
schema_name: value.schema_name,
table_name: value.table_name,
kind: Some(Kind::RenameTable(RenameTable { new_table_name })),
}
} else {
Self {
catalog_name: value.catalog_name,
schema_name: value.schema_name,
table_name: value.table_name,
kind: Some(Kind::AddColumns(AddColumns {
add_columns: value
.add_columns
.into_iter()
.map(|col| AddColumn {
column_def: Some(col),
location: None,
})
.collect(),
})),
}
}
}
}

View File

@@ -1,50 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::{ColumnDataType, ColumnDef, SemanticType};
use derive_builder::Builder;
#[derive(Default, Builder)]
pub struct TestColumnDef {
#[builder(setter(into), default)]
name: String,
data_type: ColumnDataType,
#[builder(default)]
is_nullable: bool,
semantic_type: SemanticType,
#[builder(setter(into), default)]
comment: String,
}
impl From<TestColumnDef> for ColumnDef {
fn from(
TestColumnDef {
name,
data_type,
is_nullable,
semantic_type,
comment,
}: TestColumnDef,
) -> Self {
Self {
name,
data_type: data_type as i32,
is_nullable,
default_constraint: vec![],
semantic_type: semantic_type as i32,
comment,
datatype_extension: None,
}
}
}

View File

@@ -15,7 +15,7 @@
use std::collections::HashMap;
use api::v1::column_def::try_as_column_schema;
use api::v1::{ColumnDef, CreateTableExpr, SemanticType};
use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType};
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO2_ENGINE};
use datatypes::schema::RawSchema;
@@ -24,6 +24,40 @@ use store_api::storage::TableId;
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
use table::requests::TableOptions;
#[derive(Default, Builder)]
pub struct TestColumnDef {
#[builder(setter(into), default)]
name: String,
data_type: ColumnDataType,
#[builder(default)]
is_nullable: bool,
semantic_type: SemanticType,
#[builder(setter(into), default)]
comment: String,
}
impl From<TestColumnDef> for ColumnDef {
fn from(
TestColumnDef {
name,
data_type,
is_nullable,
semantic_type,
comment,
}: TestColumnDef,
) -> Self {
Self {
name,
data_type: data_type as i32,
is_nullable,
default_constraint: vec![],
semantic_type: semantic_type as i32,
comment,
datatype_extension: None,
}
}
}
#[derive(Default, Builder)]
#[builder(default)]
pub struct TestCreateTableExpr {

View File

@@ -12,7 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod alter_logical_tables;
mod create_logical_tables;
mod create_table;
mod drop_database;

View File

@@ -1,359 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::assert_matches::assert_matches;
use std::sync::Arc;
use api::v1::{ColumnDataType, SemanticType};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_procedure::{Procedure, ProcedureId, Status};
use common_procedure_test::MockContextProvider;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::ddl::test_util::alter_table::TestAlterTableExprBuilder;
use crate::ddl::test_util::columns::TestColumnDefBuilder;
use crate::ddl::test_util::{create_logical_table, create_physical_table};
use crate::ddl::tests::create_logical_tables::NaiveDatanodeHandler;
use crate::error::Error::{AlterLogicalTablesInvalidArguments, TableNotFound};
use crate::key::table_name::TableNameKey;
use crate::rpc::ddl::AlterTableTask;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
fn make_alter_logical_table_add_column_task(
schema: Option<&str>,
table: &str,
add_columns: Vec<String>,
) -> AlterTableTask {
let add_columns = add_columns
.into_iter()
.map(|name| {
TestColumnDefBuilder::default()
.name(name)
.data_type(ColumnDataType::String)
.is_nullable(true)
.semantic_type(SemanticType::Tag)
.comment("new column".to_string())
.build()
.unwrap()
.into()
})
.collect::<Vec<_>>();
let mut alter_table = TestAlterTableExprBuilder::default();
if let Some(schema) = schema {
alter_table.schema_name(schema.to_string());
}
let alter_table = alter_table
.table_name(table.to_string())
.add_columns(add_columns)
.build()
.unwrap();
AlterTableTask {
alter_table: alter_table.into(),
}
}
fn make_alter_logical_table_rename_task(
schema: &str,
table: &str,
new_table_name: &str,
) -> AlterTableTask {
let alter_table = TestAlterTableExprBuilder::default()
.schema_name(schema.to_string())
.table_name(table.to_string())
.new_table_name(new_table_name.to_string())
.build()
.unwrap();
AlterTableTask {
alter_table: alter_table.into(),
}
}
#[tokio::test]
async fn test_on_prepare_check_schema() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let cluster_id = 1;
let tasks = vec![
make_alter_logical_table_add_column_task(
Some("schema1"),
"table1",
vec!["column1".to_string()],
),
make_alter_logical_table_add_column_task(
Some("schema2"),
"table2",
vec!["column2".to_string()],
),
];
let physical_table_id = 1024u32;
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_check_alter_kind() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let cluster_id = 1;
let tasks = vec![make_alter_logical_table_rename_task(
"schema1",
"table1",
"new_table1",
)];
let physical_table_id = 1024u32;
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_different_physical_table() {
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let phy1_id = create_physical_table(ddl_context.clone(), cluster_id, "phy1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
let phy2_id = create_physical_table(ddl_context.clone(), cluster_id, "phy2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy1_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_logical_table_not_exists() {
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
// Creates physical table
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
// table2 not exists
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, TableNotFound { .. });
}
#[tokio::test]
async fn test_on_prepare() {
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
// Creates physical table
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
make_alter_logical_table_add_column_task(None, "table3", vec!["column3".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
let result = procedure.on_prepare().await;
assert_matches!(result, Ok(Status::Executing { persist: true }));
}
#[tokio::test]
async fn test_on_update_metadata() {
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
// Creates physical table
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table4").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table5").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["new_col".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["mew_col".to_string()]),
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let ctx = common_procedure::Context {
procedure_id: ProcedureId::random(),
provider: Arc::new(MockContextProvider::default()),
};
// on_submit_alter_region_requests
status = procedure.execute(&ctx).await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
// on_update_metadata
status = procedure.execute(&ctx).await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
}
#[tokio::test]
async fn test_on_part_duplicate_alter_request() {
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
// Creates physical table
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["col_0".to_string()]),
];
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let ctx = common_procedure::Context {
procedure_id: ProcedureId::random(),
provider: Arc::new(MockContextProvider::default()),
};
// on_submit_alter_region_requests
status = procedure.execute(&ctx).await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
// on_update_metadata
status = procedure.execute(&ctx).await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
// re-alter
let tasks = vec![
make_alter_logical_table_add_column_task(
None,
"table1",
vec!["col_0".to_string(), "new_col_1".to_string()],
),
make_alter_logical_table_add_column_task(
None,
"table2",
vec![
"col_0".to_string(),
"new_col_2".to_string(),
"new_col_1".to_string(),
],
),
];
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let ctx = common_procedure::Context {
procedure_id: ProcedureId::random(),
provider: Arc::new(MockContextProvider::default()),
};
// on_submit_alter_region_requests
status = procedure.execute(&ctx).await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
// on_update_metadata
status = procedure.execute(&ctx).await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let table_name_keys = vec![
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table1"),
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table2"),
];
let table_ids = ddl_context
.table_metadata_manager
.table_name_manager()
.batch_get(table_name_keys)
.await
.unwrap()
.into_iter()
.map(|x| x.unwrap().table_id())
.collect::<Vec<_>>();
let tables = ddl_context
.table_metadata_manager
.table_info_manager()
.batch_get(&table_ids)
.await
.unwrap();
let table1 = tables.get(&table_ids[0]).unwrap();
let table2 = tables.get(&table_ids[1]).unwrap();
assert_eq!(table1.table_info.name, "table1");
assert_eq!(table2.table_info.name, "table2");
let table1_cols = table1
.table_info
.meta
.schema
.column_schemas
.iter()
.map(|x| x.name.clone())
.collect::<Vec<_>>();
assert_eq!(
table1_cols,
vec![
"col_0".to_string(),
"cpu".to_string(),
"host".to_string(),
"new_col_1".to_string(),
"ts".to_string()
]
);
let table2_cols = table2
.table_info
.meta
.schema
.column_schemas
.iter()
.map(|x| x.name.clone())
.collect::<Vec<_>>();
assert_eq!(
table2_cols,
vec![
"col_0".to_string(),
"cpu".to_string(),
"host".to_string(),
"new_col_1".to_string(),
"new_col_2".to_string(),
"ts".to_string()
]
);
}

View File

@@ -13,9 +13,12 @@
// limitations under the License.
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::Arc;
use api::v1::meta::Partition;
use api::v1::region::{QueryRequest, RegionRequest};
use api::v1::{ColumnDataType, SemanticType};
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
@@ -23,17 +26,100 @@ use common_procedure_test::MockContextProvider;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use store_api::storage::RegionId;
use table::metadata::RawTableInfo;
use crate::datanode_manager::HandleResponse;
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
use crate::ddl::test_util::{
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
};
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::test_util::create_table::build_raw_table_info_from_expr;
use crate::ddl::test_util::{TestColumnDefBuilder, TestCreateTableExprBuilder};
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::error::{Error, Result};
use crate::key::table_route::TableRouteValue;
use crate::peer::Peer;
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
use crate::rpc::ddl::CreateTableTask;
use crate::test_util::{new_ddl_context, AffectedRows, MockDatanodeHandler, MockDatanodeManager};
// Note: this code may be duplicated with others.
// However, it's by design, ensures the tests are easy to be modified or added.
fn test_create_logical_table_task(name: &str) -> CreateTableTask {
let create_table = TestCreateTableExprBuilder::default()
.column_defs([
TestColumnDefBuilder::default()
.name("ts")
.data_type(ColumnDataType::TimestampMillisecond)
.semantic_type(SemanticType::Timestamp)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("host")
.data_type(ColumnDataType::String)
.semantic_type(SemanticType::Tag)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("cpu")
.data_type(ColumnDataType::Float64)
.semantic_type(SemanticType::Field)
.build()
.unwrap()
.into(),
])
.time_index("ts")
.primary_keys(["host".into()])
.table_name(name)
.build()
.unwrap()
.into();
let table_info = build_raw_table_info_from_expr(&create_table);
CreateTableTask {
create_table,
// Single region
partitions: vec![Partition {
column_list: vec![],
value_list: vec![],
}],
table_info,
}
}
// Note: this code may be duplicated with others.
// However, it's by design, ensures the tests are easy to be modified or added.
fn test_create_physical_table_task(name: &str) -> CreateTableTask {
let create_table = TestCreateTableExprBuilder::default()
.column_defs([
TestColumnDefBuilder::default()
.name("ts")
.data_type(ColumnDataType::TimestampMillisecond)
.semantic_type(SemanticType::Timestamp)
.build()
.unwrap()
.into(),
TestColumnDefBuilder::default()
.name("value")
.data_type(ColumnDataType::Float64)
.semantic_type(SemanticType::Field)
.build()
.unwrap()
.into(),
])
.time_index("ts")
.primary_keys(["value".into()])
.table_name(name)
.build()
.unwrap()
.into();
let table_info = build_raw_table_info_from_expr(&create_table);
CreateTableTask {
create_table,
// Single region
partitions: vec![Partition {
column_list: vec![],
value_list: vec![],
}],
table_info,
}
}
#[tokio::test]
async fn test_on_prepare_physical_table_not_found() {
@@ -48,6 +134,18 @@ async fn test_on_prepare_physical_table_not_found() {
assert_matches!(err, Error::TableRouteNotFound { .. });
}
async fn create_physical_table_metadata(
ddl_context: &DdlContext,
table_info: RawTableInfo,
table_route: TableRouteValue,
) {
ddl_context
.table_metadata_manager
.create_table_metadata(table_info, table_route, HashMap::default())
.await
.unwrap();
}
#[tokio::test]
async fn test_on_prepare() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
@@ -71,7 +169,7 @@ async fn test_on_prepare() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// The create logical table procedure.
@@ -106,7 +204,7 @@ async fn test_on_prepare_logical_table_exists_err() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// Creates the logical table metadata.
@@ -152,7 +250,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// Creates the logical table metadata.
@@ -200,7 +298,7 @@ async fn test_on_prepare_part_logical_tables_exist() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// Creates the logical table metadata.
@@ -234,9 +332,9 @@ pub struct NaiveDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for NaiveDatanodeHandler {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<AffectedRows> {
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
Ok(HandleResponse::new(0))
Ok(0)
}
async fn handle_query(
@@ -271,7 +369,7 @@ async fn test_on_create_metadata() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// The create logical table procedure.
@@ -321,7 +419,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// Creates the logical table metadata.
@@ -382,7 +480,7 @@ async fn test_on_create_metadata_err() {
create_physical_table_metadata(
&ddl_context,
create_physical_table_task.table_info.clone(),
TableRouteValue::Physical(table_route),
table_route,
)
.await;
// The create logical table procedure.

View File

@@ -26,22 +26,19 @@ use common_procedure_test::MockContextProvider;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use crate::datanode_manager::HandleResponse;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::test_util::columns::TestColumnDefBuilder;
use crate::ddl::test_util::create_table::{
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
};
use crate::ddl::test_util::create_table::build_raw_table_info_from_expr;
use crate::ddl::test_util::{TestColumnDefBuilder, TestCreateTableExprBuilder};
use crate::error;
use crate::error::{Error, Result};
use crate::key::table_route::TableRouteValue;
use crate::peer::Peer;
use crate::rpc::ddl::CreateTableTask;
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
use crate::test_util::{new_ddl_context, AffectedRows, MockDatanodeHandler, MockDatanodeManager};
#[async_trait::async_trait]
impl MockDatanodeHandler for () {
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<HandleResponse> {
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<AffectedRows> {
unreachable!()
}
@@ -179,7 +176,7 @@ pub struct RetryErrorDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for RetryErrorDatanodeHandler {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<AffectedRows> {
debug!("Returning retry later for request: {request:?}, peer: {peer:?}");
Err(Error::RetryLater {
source: BoxedError::new(
@@ -223,7 +220,7 @@ pub struct UnexpectedErrorDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for UnexpectedErrorDatanodeHandler {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<AffectedRows> {
debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
error::UnexpectedSnafu {
err_msg: "mock error",
@@ -263,9 +260,9 @@ pub struct NaiveDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for NaiveDatanodeHandler {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<AffectedRows> {
debug!("Returning Ok(0) for request: {request:?}, peer: {peer:?}");
Ok(HandleResponse::new(0))
Ok(0)
}
async fn handle_query(

View File

@@ -1,123 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId};
use common_procedure_test::MockContextProvider;
use futures::TryStreamExt;
use crate::ddl::drop_database::DropDatabaseProcedure;
use crate::ddl::test_util::{create_logical_table, create_physical_table};
use crate::ddl::tests::create_table::{NaiveDatanodeHandler, RetryErrorDatanodeHandler};
use crate::key::schema_name::SchemaNameKey;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_drop_database_with_logical_tables() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
ddl_context
.table_metadata_manager
.schema_manager()
.create(
SchemaNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
None,
false,
)
.await
.unwrap();
// Creates physical table
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
false,
ddl_context.clone(),
);
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
provider: Arc::new(MockContextProvider::default()),
};
while !procedure.execute(&ctx).await.unwrap().is_done() {
procedure.execute(&ctx).await.unwrap();
}
let tables = ddl_context
.table_metadata_manager
.table_name_manager()
.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert!(tables.is_empty());
}
#[tokio::test]
async fn test_drop_database_retryable_error() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(datanode_manager);
ddl_context
.table_metadata_manager
.schema_manager()
.create(
SchemaNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
None,
false,
)
.await
.unwrap();
// Creates physical table
let phy_id = create_physical_table(ddl_context.clone(), cluster_id, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
false,
ddl_context.clone(),
);
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
provider: Arc::new(MockContextProvider::default()),
};
loop {
match procedure.execute(&ctx).await {
Ok(_) => {
// go next
}
Err(err) => {
assert!(err.is_retry_later());
break;
}
}
}
}

View File

@@ -19,7 +19,9 @@ use snafu::{ensure, location, Location, OptionExt};
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
use table::metadata::TableId;
use crate::error::{Error, Result, TableNotFoundSnafu, UnsupportedSnafu};
use crate::error::{
EmptyCreateTableTasksSnafu, Error, Result, TableNotFoundSnafu, UnsupportedSnafu,
};
use crate::key::table_name::TableNameKey;
use crate::key::TableMetadataManagerRef;
use crate::peer::Peer;
@@ -96,8 +98,7 @@ pub async fn check_and_get_physical_table_id(
None => Some(current_physical_table_name),
};
}
// Safety: `physical_table_name` is `Some` here
let physical_table_name = physical_table_name.unwrap();
let physical_table_name = physical_table_name.context(EmptyCreateTableTasksSnafu)?;
table_metadata_manager
.table_name_manager()
.get(physical_table_name)
@@ -107,22 +108,3 @@ pub async fn check_and_get_physical_table_id(
})
.map(|table| table.table_id())
}
pub async fn get_physical_table_id(
table_metadata_manager: &TableMetadataManagerRef,
logical_table_name: TableNameKey<'_>,
) -> Result<TableId> {
let logical_table_id = table_metadata_manager
.table_name_manager()
.get(logical_table_name)
.await?
.context(TableNotFoundSnafu {
table_name: logical_table_name.to_string(),
})
.map(|table| table.table_id())?;
table_metadata_manager
.table_route_manager()
.get_physical_table_id(logical_table_id)
.await
}

View File

@@ -14,9 +14,7 @@
use std::sync::Arc;
use common_procedure::{
watcher, BoxedProcedureLoader, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId,
};
use common_procedure::{watcher, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId};
use common_telemetry::tracing_context::{FutureExt, TracingContext};
use common_telemetry::{debug, info, tracing};
use snafu::{ensure, OptionExt, ResultExt};
@@ -24,20 +22,16 @@ use store_api::storage::TableId;
use crate::cache_invalidator::CacheInvalidatorRef;
use crate::datanode_manager::DatanodeManagerRef;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::drop_database::DropDatabaseProcedure;
use crate::ddl::drop_table::DropTableProcedure;
use crate::ddl::table_meta::TableMetadataAllocatorRef;
use crate::ddl::truncate_table::TruncateTableProcedure;
use crate::ddl::{utils, DdlContext, ExecutorContext, ProcedureExecutor};
use crate::error::{
EmptyDdlTasksSnafu, ParseProcedureIdSnafu, ProcedureNotFoundSnafu, ProcedureOutputSnafu,
QueryProcedureSnafu, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu,
TableInfoNotFoundSnafu, TableNotFoundSnafu, TableRouteNotFoundSnafu,
UnexpectedLogicalRouteTableSnafu, UnsupportedSnafu, WaitProcedureSnafu,
self, EmptyCreateTableTasksSnafu, ProcedureOutputSnafu, RegisterProcedureLoaderSnafu, Result,
SubmitProcedureSnafu, TableNotFoundSnafu, UnsupportedSnafu, WaitProcedureSnafu,
};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
@@ -45,22 +39,21 @@ use crate::key::table_route::TableRouteValue;
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::DdlTask::{
AlterLogicalTables, AlterTable, CreateLogicalTables, CreateTable, DropDatabase,
DropLogicalTables, DropTable, TruncateTable,
AlterLogicalTables, AlterTable, CreateLogicalTables, CreateTable, DropLogicalTables, DropTable,
TruncateTable,
};
use crate::rpc::ddl::{
AlterTableTask, CreateTableTask, DropDatabaseTask, DropTableTask, SubmitDdlTaskRequest,
SubmitDdlTaskResponse, TruncateTableTask,
AlterTableTask, CreateTableTask, DropTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse,
TruncateTableTask,
};
use crate::rpc::procedure;
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
use crate::rpc::router::RegionRoute;
use crate::table_name::TableName;
use crate::ClusterId;
pub type DdlManagerRef = Arc<DdlManager>;
pub type BoxedProcedureLoaderFactory = dyn Fn(DdlContext) -> BoxedProcedureLoader;
/// The [DdlManager] provides the ability to execute Ddl.
pub struct DdlManager {
procedure_manager: ProcedureManagerRef,
@@ -71,8 +64,8 @@ pub struct DdlManager {
memory_region_keeper: MemoryRegionKeeperRef,
}
/// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
impl DdlManager {
/// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
pub fn try_new(
procedure_manager: ProcedureManagerRef,
datanode_clients: DatanodeManagerRef,
@@ -80,7 +73,6 @@ impl DdlManager {
table_metadata_manager: TableMetadataManagerRef,
table_metadata_allocator: TableMetadataAllocatorRef,
memory_region_keeper: MemoryRegionKeeperRef,
register_loaders: bool,
) -> Result<Self> {
let manager = Self {
procedure_manager,
@@ -90,9 +82,7 @@ impl DdlManager {
table_metadata_allocator,
memory_region_keeper,
};
if register_loaders {
manager.register_loaders()?;
}
manager.register_loaders()?;
Ok(manager)
}
@@ -113,82 +103,75 @@ impl DdlManager {
}
fn register_loaders(&self) -> Result<()> {
let loaders: Vec<(&str, &BoxedProcedureLoaderFactory)> = vec![
(
let context = self.create_context();
self.procedure_manager
.register_loader(
CreateTableProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
CreateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
})
},
),
(
Box::new(move |json| {
let context = context.clone();
CreateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
type_name: CreateTableProcedure::TYPE_NAME,
})?;
let context = self.create_context();
self.procedure_manager
.register_loader(
CreateLogicalTablesProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
CreateLogicalTablesProcedure::from_json(json, context)
.map(|p| Box::new(p) as _)
})
},
),
(
AlterTableProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
AlterTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
})
},
),
(
AlterLogicalTablesProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
AlterLogicalTablesProcedure::from_json(json, context)
.map(|p| Box::new(p) as _)
})
},
),
(
Box::new(move |json| {
let context = context.clone();
CreateLogicalTablesProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
type_name: CreateLogicalTablesProcedure::TYPE_NAME,
})?;
let context = self.create_context();
self.procedure_manager
.register_loader(
DropTableProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
DropTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
})
},
),
(
Box::new(move |json| {
let context = context.clone();
DropTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
type_name: DropTableProcedure::TYPE_NAME,
})?;
let context = self.create_context();
self.procedure_manager
.register_loader(
AlterTableProcedure::TYPE_NAME,
Box::new(move |json| {
let context = context.clone();
AlterTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
type_name: AlterTableProcedure::TYPE_NAME,
})?;
let context = self.create_context();
self.procedure_manager
.register_loader(
TruncateTableProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
TruncateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
})
},
),
(
DropDatabaseProcedure::TYPE_NAME,
&|context: DdlContext| -> BoxedProcedureLoader {
Box::new(move |json: &str| {
let context = context.clone();
DropDatabaseProcedure::from_json(json, context).map(|p| Box::new(p) as _)
})
},
),
];
for (type_name, loader_factory) in loaders {
let context = self.create_context();
self.procedure_manager
.register_loader(type_name, loader_factory(context))
.context(RegisterProcedureLoaderSnafu { type_name })?;
}
Ok(())
Box::new(move |json| {
let context = context.clone();
TruncateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
type_name: TruncateTableProcedure::TYPE_NAME,
})
}
#[tracing::instrument(skip_all)]
@@ -198,11 +181,17 @@ impl DdlManager {
cluster_id: ClusterId,
alter_table_task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
physical_table_info: Option<(TableId, TableName)>,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure =
AlterTableProcedure::new(cluster_id, alter_table_task, table_info_value, context)?;
let procedure = AlterTableProcedure::new(
cluster_id,
alter_table_task,
table_info_value,
physical_table_info,
context,
)?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -226,7 +215,7 @@ impl DdlManager {
}
#[tracing::instrument(skip_all)]
/// Submits and executes a create multiple logical table tasks.
/// Submits and executes a create table task.
pub async fn submit_create_logical_table_tasks(
&self,
cluster_id: ClusterId,
@@ -247,28 +236,6 @@ impl DdlManager {
self.submit_procedure(procedure_with_id).await
}
#[tracing::instrument(skip_all)]
/// Submits and executes alter multiple table tasks.
pub async fn submit_alter_logical_table_tasks(
&self,
cluster_id: ClusterId,
alter_table_tasks: Vec<AlterTableTask>,
physical_table_id: TableId,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = AlterLogicalTablesProcedure::new(
cluster_id,
alter_table_tasks,
physical_table_id,
context,
);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
}
#[tracing::instrument(skip_all)]
/// Submits and executes a drop table task.
pub async fn submit_drop_table_task(
@@ -293,24 +260,6 @@ impl DdlManager {
self.submit_procedure(procedure_with_id).await
}
#[tracing::instrument(skip_all)]
/// Submits and executes a drop table task.
pub async fn submit_drop_database(
&self,
_cluster_id: ClusterId,
DropDatabaseTask {
catalog,
schema,
drop_if_exists,
}: DropDatabaseTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = DropDatabaseProcedure::new(catalog, schema, drop_if_exists, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
}
#[tracing::instrument(skip_all)]
/// Submits and executes a truncate table task.
pub async fn submit_truncate_table_task(
@@ -366,11 +315,12 @@ async fn handle_truncate_table_task(
let (table_info_value, table_route_value) =
table_metadata_manager.get_full_table_info(table_id).await?;
let table_info_value = table_info_value.with_context(|| TableInfoNotFoundSnafu {
table: table_ref.to_string(),
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let table_route_value = table_route_value.context(TableRouteNotFoundSnafu { table_id })?;
let table_route_value =
table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
let table_route = table_route_value.into_inner().region_routes()?.clone();
@@ -412,28 +362,50 @@ async fn handle_alter_table_task(
})?
.table_id();
let (table_info_value, table_route_value) = ddl_manager
let table_info_value = ddl_manager
.table_metadata_manager()
.get_full_table_info(table_id)
.table_info_manager()
.get(table_id)
.await?
.with_context(|| error::TableInfoNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let physical_table_id = ddl_manager
.table_metadata_manager()
.table_route_manager()
.get_physical_table_id(table_id)
.await?;
let table_route_value = table_route_value
.context(TableRouteNotFoundSnafu { table_id })?
.into_inner();
ensure!(
table_route_value.is_physical(),
UnexpectedLogicalRouteTableSnafu {
err_msg: format!("{:?} is a non-physical TableRouteValue.", table_ref),
}
);
let table_info_value = table_info_value.with_context(|| TableInfoNotFoundSnafu {
table: table_ref.to_string(),
})?;
let physical_table_info = if physical_table_id == table_id {
None
} else {
let physical_table_info = &ddl_manager
.table_metadata_manager()
.table_info_manager()
.get(physical_table_id)
.await?
.with_context(|| error::TableInfoNotFoundSnafu {
table_name: table_ref.to_string(),
})?
.table_info;
Some((
physical_table_id,
TableName {
catalog_name: physical_table_info.catalog_name.clone(),
schema_name: physical_table_info.schema_name.clone(),
table_name: physical_table_info.name.clone(),
},
))
};
let (id, _) = ddl_manager
.submit_alter_table_task(cluster_id, alter_table_task, table_info_value)
.submit_alter_table_task(
cluster_id,
alter_table_task,
table_info_value,
physical_table_info,
)
.await?;
info!("Table: {table_id} is altered via procedure_id {id:?}");
@@ -462,8 +434,8 @@ async fn handle_drop_table_task(
.get_physical_table_route(table_id)
.await?;
let table_info_value = table_info_value.with_context(|| TableInfoNotFoundSnafu {
table: table_ref.to_string(),
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
table_name: table_ref.to_string(),
})?;
let table_route_value =
@@ -516,19 +488,19 @@ async fn handle_create_table_task(
async fn handle_create_logical_table_tasks(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
create_table_tasks: Vec<CreateTableTask>,
mut create_table_tasks: Vec<CreateTableTask>,
) -> Result<SubmitDdlTaskResponse> {
ensure!(
!create_table_tasks.is_empty(),
EmptyDdlTasksSnafu {
name: "create logical tables"
}
);
ensure!(!create_table_tasks.is_empty(), EmptyCreateTableTasksSnafu);
let physical_table_id = utils::check_and_get_physical_table_id(
&ddl_manager.table_metadata_manager,
&create_table_tasks,
)
.await?;
// Sets table_ids on create_table_tasks
ddl_manager
.table_metadata_allocator
.set_table_ids_on_logic_create(&mut create_table_tasks)
.await?;
let num_logical_tables = create_table_tasks.len();
let (id, output) = ddl_manager
@@ -557,63 +529,6 @@ async fn handle_create_logical_table_tasks(
})
}
async fn handle_drop_database_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
drop_database_task: DropDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_drop_database(cluster_id, drop_database_task.clone())
.await?;
let procedure_id = id.to_string();
info!(
"Database {}.{} is dropped via procedure_id {id:?}",
drop_database_task.catalog, drop_database_task.schema
);
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
..Default::default()
})
}
async fn handle_alter_logical_table_tasks(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
alter_table_tasks: Vec<AlterTableTask>,
) -> Result<SubmitDdlTaskResponse> {
ensure!(
!alter_table_tasks.is_empty(),
EmptyDdlTasksSnafu {
name: "alter logical tables"
}
);
// Use the physical table id in the first logical table, then it will be checked in the procedure.
let first_table = TableNameKey {
catalog: &alter_table_tasks[0].alter_table.catalog_name,
schema: &alter_table_tasks[0].alter_table.schema_name,
table: &alter_table_tasks[0].alter_table.table_name,
};
let physical_table_id =
utils::get_physical_table_id(&ddl_manager.table_metadata_manager, first_table).await?;
let num_logical_tables = alter_table_tasks.len();
let (id, _) = ddl_manager
.submit_alter_logical_table_tasks(cluster_id, alter_table_tasks, physical_table_id)
.await?;
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}");
let procedure_id = id.to_string();
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
..Default::default()
})
}
/// TODO(dennis): let [`DdlManager`] implement [`ProcedureExecutor`] looks weird, find some way to refactor it.
#[async_trait::async_trait]
impl ProcedureExecutor for DdlManager {
@@ -647,13 +562,8 @@ impl ProcedureExecutor for DdlManager {
CreateLogicalTables(create_table_tasks) => {
handle_create_logical_table_tasks(self, cluster_id, create_table_tasks).await
}
AlterLogicalTables(alter_table_tasks) => {
handle_alter_logical_table_tasks(self, cluster_id, alter_table_tasks).await
}
DropLogicalTables(_) => todo!(),
DropDatabase(drop_database_task) => {
handle_drop_database_task(self, cluster_id, drop_database_task).await
}
AlterLogicalTables(_) => todo!(),
}
}
.trace(span)
@@ -676,15 +586,15 @@ impl ProcedureExecutor for DdlManager {
_ctx: &ExecutorContext,
pid: &str,
) -> Result<ProcedureStateResponse> {
let pid =
ProcedureId::parse_str(pid).with_context(|_| ParseProcedureIdSnafu { key: pid })?;
let pid = ProcedureId::parse_str(pid)
.with_context(|_| error::ParseProcedureIdSnafu { key: pid })?;
let state = self
.procedure_manager
.procedure_state(pid)
.await
.context(QueryProcedureSnafu)?
.context(ProcedureNotFoundSnafu {
.context(error::QueryProcedureSnafu)?
.context(error::ProcedureNotFoundSnafu {
pid: pid.to_string(),
})?;
@@ -740,9 +650,9 @@ mod tests {
Arc::new(TableMetadataAllocator::new(
Arc::new(SequenceBuilder::new("test", kv_backend.clone()).build()),
Arc::new(WalOptionsAllocator::default()),
table_metadata_manager.table_name_manager().clone(),
)),
Arc::new(MemoryRegionKeeper::default()),
true,
);
let expected_loaders = vec![

View File

@@ -89,8 +89,11 @@ pub enum Error {
#[snafu(display("Unexpected sequence value: {}", err_msg))]
UnexpectedSequenceValue { err_msg: String, location: Location },
#[snafu(display("Table info not found: {}", table))]
TableInfoNotFound { table: String, location: Location },
#[snafu(display("Table info not found: {}", table_name))]
TableInfoNotFound {
table_name: String,
location: Location,
},
#[snafu(display("Failed to register procedure loader, type name: {}", type_name))]
RegisterProcedureLoader {
@@ -264,12 +267,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Schema nod found, schema: {}", table_schema))]
SchemaNotFound {
table_schema: String,
location: Location,
},
#[snafu(display("Failed to rename table, reason: {}", reason))]
RenameTable { reason: String, location: Location },
@@ -395,14 +392,11 @@ pub enum Error {
#[snafu(display("Unexpected table route type: {}", err_msg))]
UnexpectedLogicalRouteTable { location: Location, err_msg: String },
#[snafu(display("The tasks of {} cannot be empty", name))]
EmptyDdlTasks { name: String, location: Location },
#[snafu(display("The tasks of create tables cannot be empty"))]
EmptyCreateTableTasks { location: Location },
#[snafu(display("Metadata corruption: {}", err_msg))]
MetadataCorruption { err_msg: String, location: Location },
#[snafu(display("Alter logical tables invalid arguments: {}", err_msg))]
AlterLogicalTablesInvalidArguments { err_msg: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -462,8 +456,7 @@ impl ErrorExt for Error {
ProcedureNotFound { .. }
| PrimaryKeyNotFound { .. }
| EmptyKey { .. }
| InvalidEngineType { .. }
| AlterLogicalTablesInvalidArguments { .. } => StatusCode::InvalidArguments,
| InvalidEngineType { .. } => StatusCode::InvalidArguments,
TableNotFound { .. } => StatusCode::TableNotFound,
TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
@@ -479,10 +472,9 @@ impl ErrorExt for Error {
InvalidCatalogValue { source, .. } => source.status_code(),
ConvertAlterTableRequest { source, .. } => source.status_code(),
ParseProcedureId { .. }
| InvalidNumTopics { .. }
| SchemaNotFound { .. }
| EmptyDdlTasks { .. } => StatusCode::InvalidArguments,
ParseProcedureId { .. } | InvalidNumTopics { .. } | EmptyCreateTableTasks { .. } => {
StatusCode::InvalidArguments
}
}
}

View File

@@ -124,7 +124,7 @@ impl OpenRegion {
}
/// The instruction of downgrading leader region.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DowngradeRegion {
/// The [RegionId].
pub region_id: RegionId,
@@ -137,7 +137,7 @@ impl Display for DowngradeRegion {
}
/// Upgrades a follower region to leader region.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpgradeRegion {
/// The [RegionId].
pub region_id: RegionId,
@@ -151,14 +151,7 @@ pub struct UpgradeRegion {
pub wait_for_replay_timeout: Option<Duration>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Display, PartialEq, Eq)]
/// The identifier of cache.
pub enum CacheIdent {
TableId(TableId),
TableName(TableName),
}
#[derive(Debug, Clone, Serialize, Deserialize, Display, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize, Display)]
pub enum Instruction {
/// Opens a region.
///
@@ -172,8 +165,10 @@ pub enum Instruction {
UpgradeRegion(UpgradeRegion),
/// Downgrades a region.
DowngradeRegion(DowngradeRegion),
/// Invalidates batch cache.
InvalidateCaches(Vec<CacheIdent>),
/// Invalidates a specified table cache.
InvalidateTableIdCache(TableId),
/// Invalidates a specified table name index cache.
InvalidateTableNameCache(TableName),
}
/// The reply of [UpgradeRegion].
@@ -203,6 +198,7 @@ pub enum InstructionReply {
OpenRegion(SimpleReply),
CloseRegion(SimpleReply),
UpgradeRegion(UpgradeRegionReply),
InvalidateTableCache(SimpleReply),
DowngradeRegion(DowngradeRegionReply),
}
@@ -212,6 +208,9 @@ impl Display for InstructionReply {
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
Self::InvalidateTableCache(reply) => {
write!(f, "InstructionReply::Invalidate({})", reply)
}
Self::DowngradeRegion(reply) => {
write!(f, "InstructionReply::DowngradeRegion({})", reply)
}

View File

@@ -88,16 +88,15 @@ use crate::error::{self, Result, SerdeJsonSnafu};
use crate::kv_backend::txn::{Txn, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
use crate::table_name::TableName;
use crate::DatanodeId;
pub const REMOVED_PREFIX: &str = "__removed";
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
pub const MAINTENANCE_KEY: &str = "maintenance";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
pub const REMOVED_PREFIX: &str = "__removed";
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
pub const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
@@ -141,6 +140,10 @@ lazy_static! {
.unwrap();
}
pub fn to_removed_key(key: &str) -> String {
format!("{REMOVED_PREFIX}-{key}")
}
pub trait TableMetaKey {
fn as_raw_key(&self) -> Vec<u8>;
}
@@ -274,10 +277,6 @@ impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithByte
self.inner
}
pub fn get_inner_ref(&self) -> &T {
&self.inner
}
/// Returns original `bytes`
pub fn get_raw_bytes(&self) -> Vec<u8> {
self.bytes.to_vec()
@@ -356,6 +355,7 @@ impl TableMetadataManager {
&self.kv_backend
}
// TODO(ruihang): deprecate this
pub async fn get_full_table_info(
&self,
table_id: TableId,
@@ -367,14 +367,17 @@ impl TableMetadataManager {
.table_route_manager
.table_route_storage()
.build_get_txn(table_id);
let (get_table_info_txn, table_info_decoder) =
self.table_info_manager.build_get_txn(table_id);
let txn = Txn::merge_all(vec![get_table_route_txn, get_table_info_txn]);
let res = self.kv_backend.txn(txn).await?;
let table_info_value = table_info_decoder(&res.responses)?;
let table_route_value = table_route_decoder(&res.responses)?;
let r = self.kv_backend.txn(txn).await?;
let table_info_value = table_info_decoder(&r.responses)?;
let table_route_value = table_route_decoder(&r.responses)?;
Ok((table_info_value, table_route_value))
}
@@ -458,7 +461,7 @@ impl TableMetadataManager {
Ok(())
}
pub fn create_logical_tables_metadata_chunk_size(&self) -> usize {
pub fn max_logical_tables_per_batch(&self) -> usize {
// The batch size is max_txn_size / 3 because the size of the `tables_data`
// is 3 times the size of the `tables_data`.
self.kv_backend.max_txn_ops() / 3
@@ -549,24 +552,30 @@ impl TableMetadataManager {
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
pub async fn delete_table_metadata(
&self,
table_id: TableId,
table_name: &TableName,
region_routes: &[RegionRoute],
table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
) -> Result<()> {
let table_info = &table_info_value.table_info;
let table_id = table_info.ident.table_id;
// Deletes table name.
let table_name = TableNameKey::new(
&table_name.catalog_name,
&table_name.schema_name,
&table_name.table_name,
&table_info.catalog_name,
&table_info.schema_name,
&table_info.name,
);
let delete_table_name_txn = self.table_name_manager().build_delete_txn(&table_name)?;
let delete_table_name_txn = self
.table_name_manager()
.build_delete_txn(&table_name, table_id)?;
// Deletes table info.
let delete_table_info_txn = self.table_info_manager().build_delete_txn(table_id)?;
let delete_table_info_txn = self
.table_info_manager()
.build_delete_txn(table_id, table_info_value)?;
// Deletes datanode table key value pairs.
let distribution = region_distribution(region_routes);
let distribution = region_distribution(table_route_value.region_routes()?);
let delete_datanode_txn = self
.datanode_table_manager()
.build_delete_txn(table_id, distribution)?;
@@ -575,7 +584,7 @@ impl TableMetadataManager {
let delete_table_route_txn = self
.table_route_manager()
.table_route_storage()
.build_delete_txn(table_id)?;
.build_delete_txn(table_id, table_route_value)?;
let txn = Txn::merge_all(vec![
delete_table_name_txn,
@@ -681,64 +690,6 @@ impl TableMetadataManager {
Ok(())
}
pub fn batch_update_table_info_value_chunk_size(&self) -> usize {
self.kv_backend.max_txn_ops()
}
pub async fn batch_update_table_info_values(
&self,
table_info_value_pairs: Vec<(TableInfoValue, RawTableInfo)>,
) -> Result<()> {
let len = table_info_value_pairs.len();
let mut txns = Vec::with_capacity(len);
struct OnFailure<F, R>
where
F: FnOnce(&Vec<TxnOpResponse>) -> R,
{
table_info_value: TableInfoValue,
on_update_table_info_failure: F,
}
let mut on_failures = Vec::with_capacity(len);
for (table_info_value, new_table_info) in table_info_value_pairs {
let table_id = table_info_value.table_info.ident.table_id;
let new_table_info_value = table_info_value.update(new_table_info);
let (update_table_info_txn, on_update_table_info_failure) =
self.table_info_manager().build_update_txn(
table_id,
&DeserializedValueWithBytes::from_inner(table_info_value),
&new_table_info_value,
)?;
txns.push(update_table_info_txn);
on_failures.push(OnFailure {
table_info_value: new_table_info_value,
on_update_table_info_failure,
});
}
let txn = Txn::merge_all(txns);
let r = self.kv_backend.txn(txn).await?;
if !r.succeeded {
for on_failure in on_failures {
let remote_table_info = (on_failure.on_update_table_info_failure)(&r.responses)?
.context(error::UnexpectedSnafu {
err_msg: "Reads the empty table info during the updating table info",
})?
.into_inner();
let op_name = "the batch updating table info";
ensure_values!(remote_table_info, on_failure.table_info_value, op_name);
}
}
Ok(())
}
pub async fn update_table_route(
&self,
table_id: TableId,
@@ -920,11 +871,10 @@ mod tests {
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::key::{DeserializedValueWithBytes, TableMetadataManager};
use crate::key::{to_removed_key, DeserializedValueWithBytes, TableMetadataManager};
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
use crate::table_name::TableName;
#[test]
fn test_deserialized_value_with_bytes() {
@@ -954,6 +904,13 @@ mod tests {
assert_eq!(decoded.bytes, expected);
}
#[test]
fn test_to_removed_key() {
let key = "test_key";
let removed = "__removed-test_key";
assert_eq!(removed, to_removed_key(key));
}
fn new_test_region_route() -> RegionRoute {
new_region_route(1, 2)
}
@@ -1140,6 +1097,9 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
let datanode_id = 2;
let table_route_value = DeserializedValueWithBytes::from_inner(TableRouteValue::physical(
region_routes.clone(),
));
// creates metadata.
create_physical_table_metadata(
@@ -1150,20 +1110,18 @@ mod tests {
.await
.unwrap();
let table_name = TableName::new(
table_info.catalog_name,
table_info.schema_name,
table_info.name,
);
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
// deletes metadata.
table_metadata_manager
.delete_table_metadata(table_id, &table_name, region_routes)
.delete_table_metadata(&table_info_value, &table_route_value)
.await
.unwrap();
// if metadata was already deleted, it should be ok.
table_metadata_manager
.delete_table_metadata(table_id, &table_name, region_routes)
.delete_table_metadata(&table_info_value, &table_route_value)
.await
.unwrap();
@@ -1190,20 +1148,24 @@ mod tests {
.unwrap()
.is_empty());
// Checks removed values
let table_info = table_metadata_manager
let removed_table_info = table_metadata_manager
.table_info_manager()
.get(table_id)
.get_removed(table_id)
.await
.unwrap();
assert!(table_info.is_none());
.unwrap()
.unwrap()
.into_inner();
assert_eq!(removed_table_info.table_info, table_info);
let table_route = table_metadata_manager
let removed_table_route = table_metadata_manager
.table_route_manager()
.table_route_storage()
.get(table_id)
.get_raw_removed(table_id)
.await
.unwrap();
assert!(table_route.is_none());
.unwrap()
.unwrap()
.into_inner();
assert_eq!(removed_table_route.region_routes().unwrap(), region_routes);
}
#[tokio::test]

View File

@@ -123,7 +123,7 @@ impl CatalogManager {
self.kv_backend.exists(&raw_key).await
}
pub fn catalog_names(&self) -> BoxStream<'static, Result<String>> {
pub async fn catalog_names(&self) -> BoxStream<'static, Result<String>> {
let start_key = CatalogNameKey::range_start_key();
let req = RangeRequest::new().with_prefix(start_key.as_bytes());

View File

@@ -173,16 +173,8 @@ impl SchemaManager {
.transpose()
}
/// Deletes a [SchemaNameKey].
pub async fn delete(&self, schema: SchemaNameKey<'_>) -> Result<()> {
let raw_key = schema.as_raw_key();
self.kv_backend.delete(&raw_key, false).await?;
Ok(())
}
/// Returns a schema stream, it lists all schemas belong to the target `catalog`.
pub fn schema_names(&self, catalog: &str) -> BoxStream<'static, Result<String>> {
pub async fn schema_names(&self, catalog: &str) -> BoxStream<'static, Result<String>> {
let start_key = SchemaNameKey::range_start_key(catalog);
let req = RangeRequest::new().with_prefix(start_key.as_bytes());

View File

@@ -20,7 +20,7 @@ use table::table_reference::TableReference;
use super::{txn_helper, DeserializedValueWithBytes, TableMetaValue, TABLE_INFO_KEY_PREFIX};
use crate::error::Result;
use crate::key::TableMetaKey;
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchGetRequest;
@@ -157,15 +157,38 @@ impl TableInfoManager {
}
/// Builds a delete table info transaction.
pub(crate) fn build_delete_txn(&self, table_id: TableId) -> Result<Txn> {
pub(crate) fn build_delete_txn(
&self,
table_id: TableId,
table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
) -> Result<Txn> {
let key = TableInfoKey::new(table_id);
let raw_key = key.as_raw_key();
let raw_value = table_info_value.get_raw_bytes();
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
let txn = Txn::new().and_then(vec![TxnOp::Delete(raw_key)]);
let txn = Txn::new().and_then(vec![
TxnOp::Delete(raw_key),
TxnOp::Put(removed_key.into_bytes(), raw_value),
]);
Ok(txn)
}
#[cfg(test)]
pub async fn get_removed(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
let key = TableInfoKey::new(table_id).to_string();
let removed_key = to_removed_key(&key).into_bytes();
self.kv_backend
.get(&removed_key)
.await?
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
.transpose()
}
pub async fn get(
&self,
table_id: TableId,

View File

@@ -22,7 +22,7 @@ use table::metadata::TableId;
use super::{TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
use crate::key::TableMetaKey;
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
@@ -195,9 +195,20 @@ impl TableNameManager {
}
/// Builds a delete table name transaction. It only executes while the primary keys comparing successes.
pub(crate) fn build_delete_txn(&self, key: &TableNameKey<'_>) -> Result<Txn> {
pub(crate) fn build_delete_txn(
&self,
key: &TableNameKey<'_>,
table_id: TableId,
) -> Result<Txn> {
let raw_key = key.as_raw_key();
let txn = Txn::new().and_then(vec![TxnOp::Delete(raw_key)]);
let value = TableNameValue::new(table_id);
let raw_value = value.try_as_raw_value()?;
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
let txn = Txn::new().and_then(vec![
TxnOp::Delete(raw_key),
TxnOp::Put(removed_key.into_bytes(), raw_value),
]);
Ok(txn)
}
@@ -241,7 +252,7 @@ impl TableNameManager {
self.kv_backend.exists(&raw_key).await
}
pub fn tables(
pub async fn tables(
&self,
catalog: &str,
schema: &str,

View File

@@ -25,7 +25,7 @@ use crate::error::{
self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu,
UnexpectedLogicalRouteTableSnafu,
};
use crate::key::{RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
use crate::rpc::router::{region_distribution, RegionRoute};
@@ -147,7 +147,7 @@ impl TableRouteValue {
///
/// # Panic
/// If it is not the [`PhysicalTableRouteValue`].
pub fn into_physical_table_route(self) -> PhysicalTableRouteValue {
fn into_physical_table_route(self) -> PhysicalTableRouteValue {
match self {
TableRouteValue::Physical(x) => x,
_ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"),
@@ -485,15 +485,38 @@ impl TableRouteStorage {
/// Builds a delete table route transaction,
/// it expected the remote value equals the `table_route_value`.
pub(crate) fn build_delete_txn(&self, table_id: TableId) -> Result<Txn> {
pub(crate) fn build_delete_txn(
&self,
table_id: TableId,
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
) -> Result<Txn> {
let key = TableRouteKey::new(table_id);
let raw_key = key.as_raw_key();
let raw_value = table_route_value.get_raw_bytes();
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
let txn = Txn::new().and_then(vec![TxnOp::Delete(raw_key)]);
let txn = Txn::new().and_then(vec![
TxnOp::Delete(raw_key),
TxnOp::Put(removed_key.into_bytes(), raw_value),
]);
Ok(txn)
}
#[cfg(test)]
pub async fn get_raw_removed(
&self,
table_id: TableId,
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
let key = TableRouteKey::new(table_id).to_string();
let removed_key = to_removed_key(&key).into_bytes();
self.kv_backend
.get(&removed_key)
.await?
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
.transpose()
}
/// Returns the [`TableRouteValue`].
pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
let key = TableRouteKey::new(table_id);

View File

@@ -236,8 +236,6 @@ impl<K, V> Stream for PaginationStream<K, V> {
PaginationStreamState::Init => {
let factory = self.factory.take().expect("lost factory");
if !factory.more {
// Ensures the factory always exists.
self.factory = Some(factory);
return Poll::Ready(None);
}
let fut = factory.read_next().boxed();

View File

@@ -19,11 +19,10 @@ use api::v1::meta::{
AlterTableTask as PbAlterTableTask, AlterTableTasks as PbAlterTableTasks,
CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
DropDatabaseTask as PbDropDatabaseTask, DropTableTask as PbDropTableTask,
DropTableTasks as PbDropTableTasks, Partition, ProcedureId,
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks, Partition, ProcedureId,
TruncateTableTask as PbTruncateTableTask,
};
use api::v1::{AlterExpr, CreateTableExpr, DropDatabaseExpr, DropTableExpr, TruncateTableExpr};
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, TruncateTableExpr};
use base64::engine::general_purpose;
use base64::Engine as _;
use prost::Message;
@@ -44,7 +43,6 @@ pub enum DdlTask {
CreateLogicalTables(Vec<CreateTableTask>),
DropLogicalTables(Vec<DropTableTask>),
AlterLogicalTables(Vec<AlterTableTask>),
DropDatabase(DropDatabaseTask),
}
impl DdlTask {
@@ -65,15 +63,6 @@ impl DdlTask {
)
}
pub fn new_alter_logical_tables(table_data: Vec<AlterExpr>) -> Self {
DdlTask::AlterLogicalTables(
table_data
.into_iter()
.map(|alter_table| AlterTableTask { alter_table })
.collect(),
)
}
pub fn new_drop_table(
catalog: String,
schema: String,
@@ -90,14 +79,6 @@ impl DdlTask {
})
}
pub fn new_drop_database(catalog: String, schema: String, drop_if_exists: bool) -> Self {
DdlTask::DropDatabase(DropDatabaseTask {
catalog,
schema,
drop_if_exists,
})
}
pub fn new_alter_table(alter_table: AlterExpr) -> Self {
DdlTask::AlterTable(AlterTableTask { alter_table })
}
@@ -156,9 +137,6 @@ impl TryFrom<Task> for DdlTask {
Ok(DdlTask::AlterLogicalTables(tasks))
}
Task::DropDatabaseTask(drop_database) => {
Ok(DdlTask::DropDatabase(drop_database.try_into()?))
}
}
}
}
@@ -201,7 +179,6 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
Task::AlterTableTasks(PbAlterTableTasks { tasks })
}
DdlTask::DropDatabase(task) => Task::DropDatabaseTask(task.try_into()?),
};
Ok(Self {
@@ -391,20 +368,6 @@ impl CreateTableTask {
pub fn set_table_id(&mut self, table_id: TableId) {
self.table_info.ident.table_id = table_id;
}
/// Sort the columns in [CreateTableExpr] and [RawTableInfo].
///
/// This function won't do any check or verification. Caller should
/// ensure this task is valid.
pub fn sort_columns(&mut self) {
// sort create table expr
// sort column_defs by name
self.create_table
.column_defs
.sort_unstable_by(|a, b| a.name.cmp(&b.name));
self.table_info.sort_columns();
}
}
impl Serialize for CreateTableTask {
@@ -556,7 +519,7 @@ impl TryFrom<PbTruncateTableTask> for TruncateTableTask {
fn try_from(pb: PbTruncateTableTask) -> Result<Self> {
let truncate_table = pb.truncate_table.context(error::InvalidProtoMsgSnafu {
err_msg: "expected truncate table",
err_msg: "expected drop table",
})?;
Ok(Self {
@@ -588,62 +551,13 @@ impl TryFrom<TruncateTableTask> for PbTruncateTableTask {
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct DropDatabaseTask {
pub catalog: String,
pub schema: String,
pub drop_if_exists: bool,
}
impl TryFrom<PbDropDatabaseTask> for DropDatabaseTask {
type Error = error::Error;
fn try_from(pb: PbDropDatabaseTask) -> Result<Self> {
let DropDatabaseExpr {
catalog_name,
schema_name,
drop_if_exists,
} = pb.drop_database.context(error::InvalidProtoMsgSnafu {
err_msg: "expected drop database",
})?;
Ok(DropDatabaseTask {
catalog: catalog_name,
schema: schema_name,
drop_if_exists,
})
}
}
impl TryFrom<DropDatabaseTask> for PbDropDatabaseTask {
type Error = error::Error;
fn try_from(
DropDatabaseTask {
catalog,
schema,
drop_if_exists,
}: DropDatabaseTask,
) -> Result<Self> {
Ok(PbDropDatabaseTask {
drop_database: Some(DropDatabaseExpr {
catalog_name: catalog,
schema_name: schema,
drop_if_exists,
}),
})
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use api::v1::{AlterExpr, ColumnDef, CreateTableExpr, SemanticType};
use datatypes::schema::{ColumnSchema, RawSchema, SchemaBuilder};
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
use store_api::storage::ConcreteDataType;
use table::metadata::{RawTableInfo, RawTableMeta, TableType};
use api::v1::{AlterExpr, CreateTableExpr};
use datatypes::schema::SchemaBuilder;
use table::metadata::RawTableInfo;
use table::test_util::table_info::test_table_info;
use super::{AlterTableTask, CreateTableTask};
@@ -675,109 +589,4 @@ mod tests {
let de = serde_json::from_slice(&output).unwrap();
assert_eq!(task, de);
}
#[test]
fn test_sort_columns() {
// construct RawSchema
let raw_schema = RawSchema {
column_schemas: vec![
ColumnSchema::new(
"column3".to_string(),
ConcreteDataType::string_datatype(),
true,
),
ColumnSchema::new(
"column1".to_string(),
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true),
ColumnSchema::new(
"column2".to_string(),
ConcreteDataType::float64_datatype(),
true,
),
],
timestamp_index: Some(1),
version: 0,
};
// construct RawTableMeta
let raw_table_meta = RawTableMeta {
schema: raw_schema,
primary_key_indices: vec![0],
value_indices: vec![2],
engine: METRIC_ENGINE_NAME.to_string(),
next_column_id: 0,
region_numbers: vec![0],
options: Default::default(),
created_on: Default::default(),
partition_key_indices: Default::default(),
};
// construct RawTableInfo
let raw_table_info = RawTableInfo {
ident: Default::default(),
meta: raw_table_meta,
name: Default::default(),
desc: Default::default(),
catalog_name: Default::default(),
schema_name: Default::default(),
table_type: TableType::Base,
};
// construct create table expr
let create_table_expr = CreateTableExpr {
column_defs: vec![
ColumnDef {
name: "column3".to_string(),
semantic_type: SemanticType::Tag as i32,
..Default::default()
},
ColumnDef {
name: "column1".to_string(),
semantic_type: SemanticType::Timestamp as i32,
..Default::default()
},
ColumnDef {
name: "column2".to_string(),
semantic_type: SemanticType::Field as i32,
..Default::default()
},
],
primary_keys: vec!["column3".to_string()],
..Default::default()
};
let mut create_table_task =
CreateTableTask::new(create_table_expr, Vec::new(), raw_table_info);
// Call the sort_columns method
create_table_task.sort_columns();
// Assert that the columns are sorted correctly
assert_eq!(
create_table_task.create_table.column_defs[0].name,
"column1".to_string()
);
assert_eq!(
create_table_task.create_table.column_defs[1].name,
"column2".to_string()
);
assert_eq!(
create_table_task.create_table.column_defs[2].name,
"column3".to_string()
);
// Assert that the table_info is updated correctly
assert_eq!(
create_table_task.table_info.meta.schema.timestamp_index,
Some(0)
);
assert_eq!(
create_table_task.table_info.meta.primary_key_indices,
vec![2]
);
assert_eq!(create_table_task.table_info.meta.value_indices, vec![1]);
}
}

View File

@@ -34,14 +34,10 @@ pub struct SequenceBuilder {
max: u64,
}
fn seq_name(name: impl AsRef<str>) -> String {
format!("{}-{}", SEQ_PREFIX, name.as_ref())
}
impl SequenceBuilder {
pub fn new(name: impl AsRef<str>, generator: KvBackendRef) -> Self {
Self {
name: seq_name(name),
name: format!("{}-{}", SEQ_PREFIX, name.as_ref()),
initial: 0,
step: 1,
generator,
@@ -142,14 +138,13 @@ impl Inner {
pub async fn next_range(&self) -> Result<Range<u64>> {
let key = self.name.as_bytes();
let mut start = self.next;
let mut expect = if start == self.initial {
vec![]
} else {
u64::to_le_bytes(start).to_vec()
};
for _ in 0..self.force_quit {
let expect = if start == self.initial {
vec![]
} else {
u64::to_le_bytes(start).to_vec()
};
let step = self.step.min(self.max - start);
ensure!(
@@ -172,24 +167,15 @@ impl Inner {
if !res.success {
if let Some(kv) = res.prev_kv {
expect = kv.value.clone();
let v: [u8; 8] = match kv.value.try_into() {
Ok(a) => a,
Err(v) => {
return error::UnexpectedSequenceValueSnafu {
err_msg: format!("Not a valid u64 for '{}': {v:?}", self.name),
}
.fail()
let value = kv.value;
ensure!(
value.len() == std::mem::size_of::<u64>(),
error::UnexpectedSequenceValueSnafu {
err_msg: format!("key={}, unexpected value={:?}", self.name, value)
}
};
let v = u64::from_le_bytes(v);
// If the existed value is smaller than the initial, we should start from the initial.
start = v.max(self.initial);
);
start = u64::from_le_bytes(value.try_into().unwrap());
} else {
expect = vec![];
start = self.initial;
}
continue;
@@ -211,12 +197,8 @@ impl Inner {
#[cfg(test)]
mod tests {
use std::any::Any;
use std::collections::HashSet;
use std::sync::Arc;
use itertools::{Itertools, MinMaxResult};
use tokio::sync::mpsc;
use super::*;
use crate::error::Error;
use crate::kv_backend::memory::MemoryKvBackend;
@@ -227,76 +209,6 @@ mod tests {
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
};
#[tokio::test]
async fn test_sequence_with_existed_value() {
async fn test(exist: u64, expected: Vec<u64>) {
let kv_backend = Arc::new(MemoryKvBackend::default());
let exist = u64::to_le_bytes(exist);
kv_backend
.put(PutRequest::new().with_key(seq_name("s")).with_value(exist))
.await
.unwrap();
let initial = 100;
let seq = SequenceBuilder::new("s", kv_backend)
.initial(initial)
.build();
let mut actual = Vec::with_capacity(expected.len());
for _ in 0..expected.len() {
actual.push(seq.next().await.unwrap());
}
assert_eq!(actual, expected);
}
// put a value not greater than the "initial", the sequence should start from "initial"
test(1, vec![100, 101, 102]).await;
test(100, vec![100, 101, 102]).await;
// put a value greater than the "initial", the sequence should start from the put value
test(200, vec![200, 201, 202]).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sequence_with_contention() {
let seq = Arc::new(
SequenceBuilder::new("s", Arc::new(MemoryKvBackend::default()))
.initial(1024)
.build(),
);
let (tx, mut rx) = mpsc::unbounded_channel();
// Spawn 10 tasks to concurrently get the next sequence. Each task will get 100 sequences.
for _ in 0..10 {
tokio::spawn({
let seq = seq.clone();
let tx = tx.clone();
async move {
for _ in 0..100 {
tx.send(seq.next().await.unwrap()).unwrap()
}
}
});
}
// Test that we get 1000 unique sequences, and start from 1024 to 2023.
let mut nums = HashSet::new();
let mut c = 0;
while c < 1000
&& let Some(x) = rx.recv().await
{
nums.insert(x);
c += 1;
}
assert_eq!(nums.len(), 1000);
let MinMaxResult::MinMax(min, max) = nums.iter().minmax() else {
unreachable!("nums has more than one elements");
};
assert_eq!(*min, 1024);
assert_eq!(*max, 2023);
}
#[tokio::test]
async fn test_sequence() {
let kv_backend = Arc::new(MemoryKvBackend::default());

View File

@@ -19,9 +19,7 @@ pub use common_base::AffectedRows;
use common_recordbatch::SendableRecordBatchStream;
use crate::cache_invalidator::DummyCacheInvalidator;
use crate::datanode_manager::{
Datanode, DatanodeManager, DatanodeManagerRef, DatanodeRef, HandleResponse,
};
use crate::datanode_manager::{Datanode, DatanodeManager, DatanodeManagerRef, DatanodeRef};
use crate::ddl::table_meta::TableMetadataAllocator;
use crate::ddl::DdlContext;
use crate::error::Result;
@@ -34,7 +32,7 @@ use crate::wal_options_allocator::WalOptionsAllocator;
#[async_trait::async_trait]
pub trait MockDatanodeHandler: Sync + Send + Clone {
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse>;
async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<AffectedRows>;
async fn handle_query(
&self,
@@ -64,7 +62,7 @@ struct MockDatanode<T> {
#[async_trait::async_trait]
impl<T: MockDatanodeHandler> Datanode for MockDatanode<T> {
async fn handle(&self, request: RegionRequest) -> Result<HandleResponse> {
async fn handle(&self, request: RegionRequest) -> Result<AffectedRows> {
self.handler.handle(&self.peer, request).await
}
@@ -99,6 +97,7 @@ pub fn new_ddl_context(datanode_manager: DatanodeManagerRef) -> DdlContext {
.build(),
),
Arc::new(WalOptionsAllocator::default()),
table_metadata_manager.table_name_manager().clone(),
)),
table_metadata_manager,
}

View File

@@ -16,5 +16,4 @@
pub const GREPTIME_EXEC_PREFIX: &str = "greptime_exec_";
/// Execution cost metrics key
pub const GREPTIME_EXEC_READ_COST: &str = "greptime_exec_read_cost";
pub const GREPTIME_EXEC_WRITE_COST: &str = "greptime_exec_write_cost";
pub const GREPTIME_EXEC_COST: &str = "greptime_exec_cost";

View File

@@ -17,4 +17,4 @@
/// since `plugins` crate is at the top depending on crates like `frontend` and `datanode`
mod consts;
pub use consts::{GREPTIME_EXEC_PREFIX, GREPTIME_EXEC_READ_COST, GREPTIME_EXEC_WRITE_COST};
pub use consts::{GREPTIME_EXEC_COST, GREPTIME_EXEC_PREFIX};

View File

@@ -25,8 +25,8 @@ pub mod watcher;
pub use crate::error::{Error, Result};
pub use crate::procedure::{
BoxedProcedure, BoxedProcedureLoader, Context, ContextProvider, LockKey, Output, ParseIdError,
Procedure, ProcedureId, ProcedureManager, ProcedureManagerRef, ProcedureState, ProcedureWithId,
Status, StringKey,
BoxedProcedure, Context, ContextProvider, LockKey, Output, ParseIdError, Procedure,
ProcedureId, ProcedureManager, ProcedureManagerRef, ProcedureState, ProcedureWithId, Status,
StringKey,
};
pub use crate::watcher::Watcher;

View File

@@ -799,10 +799,8 @@ mod tests {
let root_id = ProcedureId::random();
// Prepare data for the root procedure.
for step in 0..3 {
let type_name = root.type_name().to_string();
let data = root.dump().unwrap();
procedure_store
.store_procedure(root_id, step, type_name, data, None)
.store_procedure(root_id, step, &root, None)
.await
.unwrap();
}
@@ -811,10 +809,8 @@ mod tests {
let child_id = ProcedureId::random();
// Prepare data for the child procedure
for step in 0..2 {
let type_name = child.type_name().to_string();
let data = child.dump().unwrap();
procedure_store
.store_procedure(child_id, step, type_name, data, Some(root_id))
.store_procedure(child_id, step, &child, Some(root_id))
.await
.unwrap();
}

View File

@@ -385,7 +385,7 @@ impl Runner {
}
/// Extend the retry time to wait for the next retry.
async fn wait_on_err(&mut self, d: Duration, i: u64) {
async fn wait_on_err(&self, d: Duration, i: u64) {
logging::info!(
"Procedure {}-{} retry for the {} times after {} millis",
self.procedure.type_name(),
@@ -396,7 +396,7 @@ impl Runner {
time::sleep(d).await;
}
async fn on_suspended(&mut self, subprocedures: Vec<ProcedureWithId>) {
async fn on_suspended(&self, subprocedures: Vec<ProcedureWithId>) {
let has_child = !subprocedures.is_empty();
for subprocedure in subprocedures {
logging::info!(
@@ -429,15 +429,11 @@ impl Runner {
}
async fn persist_procedure(&mut self) -> Result<()> {
let type_name = self.procedure.type_name().to_string();
let data = self.procedure.dump()?;
self.store
.store_procedure(
self.meta.id,
self.step,
type_name,
data,
&self.procedure,
self.meta.parent_id,
)
.await

Some files were not shown because too many files have changed in this diff Show More