mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-08 22:32:55 +00:00
Compare commits
67 Commits
v0.1.2-alp
...
v0.2.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48c2841e4d | ||
|
|
d2542552d3 | ||
|
|
c0132e6cc0 | ||
|
|
aea932b891 | ||
|
|
0253136333 | ||
|
|
6a05f617a4 | ||
|
|
a2b262ebc0 | ||
|
|
972f64c3d7 | ||
|
|
eb77f9aafd | ||
|
|
dee20144d7 | ||
|
|
563adbabe9 | ||
|
|
b71bb4e5fa | ||
|
|
fae293310c | ||
|
|
3e51640442 | ||
|
|
b40193d7da | ||
|
|
b5e5f8e555 | ||
|
|
192fa0caa5 | ||
|
|
30eb676d6a | ||
|
|
d7cadf6e6d | ||
|
|
d7a1435517 | ||
|
|
0943079de2 | ||
|
|
509d07b798 | ||
|
|
e72ce5eaa9 | ||
|
|
f491a040f5 | ||
|
|
47179a7812 | ||
|
|
995a28a27d | ||
|
|
ed1cb73ffc | ||
|
|
0ffa628c22 | ||
|
|
5edd2a3dbe | ||
|
|
e63b28bff1 | ||
|
|
8140d4e3e5 | ||
|
|
6825459c75 | ||
|
|
7eb4d81929 | ||
|
|
8ba0741c81 | ||
|
|
0eeb5b460c | ||
|
|
65ea6fd85f | ||
|
|
4f15b26b28 | ||
|
|
15ee4ac729 | ||
|
|
b4fc8c5b78 | ||
|
|
6f81717866 | ||
|
|
77f9383daf | ||
|
|
c788b7fc26 | ||
|
|
0f160a73be | ||
|
|
92963b9614 | ||
|
|
f1139fba59 | ||
|
|
4e552245b1 | ||
|
|
3126bbc1c7 | ||
|
|
b77b561bc8 | ||
|
|
501faad8ab | ||
|
|
5397a9bbe6 | ||
|
|
f351ee7042 | ||
|
|
e0493e0b8f | ||
|
|
b2a09c888a | ||
|
|
af101480b3 | ||
|
|
b8f7f603cf | ||
|
|
8fb97ea1d8 | ||
|
|
21ce9c1163 | ||
|
|
0a22375ac1 | ||
|
|
0596d20a3b | ||
|
|
e19c8fa2b6 | ||
|
|
ad886f5b3e | ||
|
|
f6669a8201 | ||
|
|
ad5c47185d | ||
|
|
64441616db | ||
|
|
09491d6aee | ||
|
|
7cfa30b2ab | ||
|
|
a7676d8860 |
104
.github/workflows/release.yml
vendored
104
.github/workflows/release.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
schedule:
|
||||
# At 00:00 on Monday.
|
||||
- cron: '0 0 * * 1'
|
||||
# Mannually trigger only builds binaries.
|
||||
workflow_dispatch:
|
||||
|
||||
name: Release
|
||||
@@ -32,22 +33,42 @@ jobs:
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
opts: "-F servers/dashboard"
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend,servers/dashboard"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
@@ -105,11 +126,12 @@ jobs:
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
- name: Compile Python 3.10.10 from source for Aarch64
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
|
||||
- name: Compile Python 3.10.10 from source for linux
|
||||
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -124,17 +146,51 @@ jobs:
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=python3
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for amd64-linux
|
||||
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "version=3.10" >> pyo3.config
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "shared=true" >> pyo3.config
|
||||
echo "abi3=true" >> pyo3.config
|
||||
echo "lib_name=python3.10" >> pyo3.config
|
||||
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
|
||||
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
|
||||
echo "pointer_width=64" >> pyo3.config
|
||||
echo "build_flags=" >> pyo3.config
|
||||
echo "suppress_build_script_link_lines=false" >> pyo3.config
|
||||
|
||||
cat pyo3.config
|
||||
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
@@ -161,7 +217,7 @@ jobs:
|
||||
name: Build docker image
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -196,35 +252,33 @@ jobs:
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
name: greptime-linux-amd64-pyo3
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
|
||||
cp -r amd64 docker/ci
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
name: greptime-linux-arm64-pyo3
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
|
||||
cp -r arm64 docker/ci
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -236,7 +290,7 @@ jobs:
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
@@ -249,7 +303,7 @@ jobs:
|
||||
# Release artifacts only when all the artifacts are built successfully.
|
||||
needs: [build,docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -292,7 +346,7 @@ jobs:
|
||||
name: Push docker image to UCloud Container Registry
|
||||
needs: [docker]
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'GreptimeTeam/greptimedb'
|
||||
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
|
||||
# Push to uhub may fail(500 error), but we don't want to block the release process. The failed job will be retried manually.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -35,3 +35,7 @@ benchmarks/data
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# dashboard files
|
||||
!/src/servers/dashboard/VERSION
|
||||
/src/servers/dashboard/*
|
||||
|
||||
1037
Cargo.lock
generated
1037
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
24
Cargo.toml
24
Cargo.toml
@@ -7,6 +7,7 @@ members = [
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
@@ -50,24 +51,25 @@ edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
arrow = { version = "34.0" }
|
||||
arrow-array = "34.0"
|
||||
arrow-flight = "34.0"
|
||||
arrow-schema = { version = "34.0", features = ["serde"] }
|
||||
arrow = { version = "36.0" }
|
||||
arrow-array = "36.0"
|
||||
arrow-flight = "36.0"
|
||||
arrow-schema = { version = "36.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parquet = "34.0"
|
||||
parquet = "36.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
19
README.md
19
README.md
@@ -1,8 +1,8 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
@@ -61,12 +61,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
|
||||
in cpython): this install a Python shared library required for running python
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default.
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -147,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
downloadable pre-built binaries for Linux and MacOS
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
|
||||
Docker images
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
@@ -158,6 +158,7 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
19
SECURITY.md
Normal file
19
SECURITY.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| >= v0.1.0 | :white_check_mark: |
|
||||
| < v0.1.0 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We place great importance on the security of GreptimeDB code, software,
|
||||
and cloud platform. If you come across a security vulnerability in GreptimeDB,
|
||||
we kindly request that you inform us immediately. We will thoroughly investigate
|
||||
all valid reports and make every effort to resolve the issue promptly.
|
||||
|
||||
To report any issues or vulnerabilities, please email us at info@greptime.com, rather than
|
||||
posting publicly on GitHub. Be sure to provide us with the version identifier as well as details
|
||||
on how the vulnerability can be exploited.
|
||||
@@ -21,12 +21,12 @@ use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -61,7 +61,7 @@ struct Args {
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
@@ -97,6 +97,9 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
@@ -122,11 +125,17 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let values = build_values(array);
|
||||
let (values, datatype) = build_values(array);
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().to_owned(),
|
||||
values: Some(values),
|
||||
null_mask: vec![],
|
||||
null_mask: array
|
||||
.data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
};
|
||||
@@ -136,7 +145,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
@@ -144,10 +153,13 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
@@ -155,29 +167,38 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
ts_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
@@ -213,6 +234,10 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
@@ -228,13 +253,13 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
datatype: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
datatype: ColumnDataType::Int64 as i32,
|
||||
datatype: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
},
|
||||
@@ -340,7 +365,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,11 +37,19 @@ type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[compaction]
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '30s'
|
||||
|
||||
# Procedure storage options, see `standalone.example.toml`.
|
||||
# [procedure.store]
|
||||
# type = "File"
|
||||
|
||||
@@ -99,7 +99,7 @@ type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
# Compaction options.
|
||||
[compaction]
|
||||
[storage.compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
@@ -107,6 +107,15 @@ max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '30s'
|
||||
|
||||
|
||||
# Procedure storage options.
|
||||
# Uncomment to enable.
|
||||
# [procedure.store]
|
||||
|
||||
@@ -1,9 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
|
||||
tar -xvf Python-3.10.10.tgz
|
||||
cd Python-3.10.10
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
@@ -14,33 +41,47 @@ cd Python-3.10.10
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
./configure \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
|
||||
cd Python-3.10.10 && \
|
||||
make clean && \
|
||||
make distclean && \
|
||||
alias python=python3 && \
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix=$PY_INSTALL_PATH --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make && make altinstall && \
|
||||
cd ..
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
COPY requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
5
docker/ci/requirements.txt
Normal file
5
docker/ci/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
numpy>=1.24.2
|
||||
pandas>=1.5.3
|
||||
pyarrow>=11.0.0
|
||||
requests>=2.28.2
|
||||
scipy>=1.10.1
|
||||
39
scripts/fetch-dashboard-assets.sh
Executable file
39
scripts/fetch-dashboard-assets.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
FreeBSD | Darwin)
|
||||
echo "$(cat sha256.txt)" | shasum --algorithm 256 --check \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
Linux)
|
||||
echo "$(cat sha256.txt)" | sha256sum --check -- \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
*)
|
||||
echo "The '$(uname -s)' operating system is not supported as a build host for the dashboard" >&2
|
||||
exit 1
|
||||
esac
|
||||
|
||||
# Extract the assets and clean up.
|
||||
tar -xzf build.tar.gz -C "$STATIC_DIR"
|
||||
rm sha256.txt
|
||||
rm build.tar.gz
|
||||
|
||||
echo "Successfully download dashboard assets to $STATIC_DIR"
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3a715150563b89d5dfc81a5838eac1f66a5658a1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d3861c34f7920238869d0d4e50dc1e6b189d2a6b" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -219,6 +219,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system table definition: {err_msg}"))]
|
||||
InvalidSystemTableDef {
|
||||
err_msg: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -231,7 +237,8 @@ impl ErrorExt for Error {
|
||||
| Error::TableNotFound { .. }
|
||||
| Error::IllegalManagerState { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::InvalidEntryType { .. } => StatusCode::Unexpected,
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::InvalidSystemTableDef { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
|
||||
@@ -19,8 +19,8 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -228,43 +228,32 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
|
||||
/// The stat of regions in the datanode node.
|
||||
/// The number of regions can be got from len of vec.
|
||||
pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<RegionStat>> {
|
||||
///
|
||||
/// Ignores any errors occurred during iterating regions. The intention of this method is to
|
||||
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
|
||||
/// "try our best" job.
|
||||
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table =
|
||||
schema
|
||||
.table(&table_name)
|
||||
.await?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
let Ok(schema_names) = catalog.schema_names() else { continue };
|
||||
for schema_name in schema_names {
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
|
||||
|
||||
region_stats.extend(
|
||||
table
|
||||
.region_stats()
|
||||
.context(error::RegionStatsSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
table: &table_name,
|
||||
})?
|
||||
.into_iter()
|
||||
.map(|stat| RegionStat {
|
||||
let Ok(table_names) = schema.table_names() else { continue };
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
@@ -273,10 +262,16 @@ pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<Reg
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
..Default::default()
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
region_stats.extend(stats);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get region status, err: {:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_stats)
|
||||
(region_number, region_stats)
|
||||
}
|
||||
|
||||
@@ -400,7 +400,7 @@ mod tests {
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use object_store::ObjectStore;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
@@ -482,11 +482,9 @@ mod tests {
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = object_store::services::Fs::default();
|
||||
builder.root(&store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::common::{OwnedTableReference, ResolvedTableReference, TableReference};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
@@ -87,9 +87,8 @@ impl DfTableSourceProvider {
|
||||
|
||||
pub async fn resolve_table(
|
||||
&mut self,
|
||||
table_ref: OwnedTableReference,
|
||||
table_ref: TableReference<'_>,
|
||||
) -> Result<Arc<dyn TableSource>> {
|
||||
let table_ref = table_ref.as_table_reference();
|
||||
let table_ref = self.resolve_table_ref(table_ref)?;
|
||||
|
||||
let resolved_name = table_ref.to_string();
|
||||
|
||||
@@ -23,7 +23,7 @@ enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
@@ -23,6 +26,10 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
@@ -118,7 +125,7 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
@@ -131,11 +138,30 @@ impl Client {
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let mut client = HealthCheckClient::new(channel);
|
||||
client.health_check(HealthCheckRequest {}).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,15 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||
GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
@@ -31,28 +30,49 @@ use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
// The dbname follows naming rule as out mysql, postgres and http
|
||||
// protocol. The server treat dbname in priority of catalog/schema.
|
||||
dbname: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
/// Create database service client using catalog and schema
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create database service client using dbname.
|
||||
///
|
||||
/// This API is designed for external usage. `dbname` is:
|
||||
///
|
||||
/// - the name of database when using GreptimeDB standalone or cluster
|
||||
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
dbname: dbname.into(),
|
||||
client,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,14 +92,41 @@ impl Database {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn dbname(&self) -> &String {
|
||||
&self.dbname
|
||||
}
|
||||
|
||||
pub fn set_dbname(&mut self, dbname: impl Into<String>) {
|
||||
self.dbname = dbname.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
@@ -148,6 +195,7 @@ impl Database {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
@@ -155,7 +203,7 @@ impl Database {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
@@ -164,22 +212,22 @@ impl Database {
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
e.code(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
@@ -210,12 +258,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -65,9 +66,12 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
/// Error deserialized from gRPC metadata
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -77,13 +81,15 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ExternalError { code, .. } => *code,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,3 +101,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![doc = include_str!("../../../../README.md")]
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
@@ -31,7 +31,6 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle cli shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::datanode::{
|
||||
@@ -21,7 +23,7 @@ use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -34,8 +36,10 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle datanode shutdown
|
||||
Ok(())
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,6 +88,10 @@ struct StartCommand {
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
procedure_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -144,7 +152,7 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
}
|
||||
|
||||
if let Some(data_dir) = cmd.data_dir {
|
||||
opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = cmd.wal_dir {
|
||||
@@ -153,6 +161,12 @@ impl TryFrom<StartCommand> for DatanodeOptions {
|
||||
if let Some(procedure_dir) = cmd.procedure_dir {
|
||||
opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir));
|
||||
}
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
opts.http_opts.addr = http_addr
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
@@ -164,8 +178,9 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
|
||||
use datanode::datanode::{CompactionConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -201,10 +216,14 @@ mod tests {
|
||||
type = "File"
|
||||
data_dir = "/tmp/greptimedb/data/"
|
||||
|
||||
[compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -235,9 +254,9 @@ mod tests {
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert!(tcp_nodelay);
|
||||
|
||||
match options.storage {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
|
||||
match &options.storage.store {
|
||||
ObjectStoreConfig::File(FileConfig { data_dir, .. }) => {
|
||||
assert_eq!("/tmp/greptimedb/data/", data_dir)
|
||||
}
|
||||
ObjectStoreConfig::S3 { .. } => unreachable!(),
|
||||
ObjectStoreConfig::Oss { .. } => unreachable!(),
|
||||
@@ -245,11 +264,19 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_inflight_tasks: 3,
|
||||
max_files_in_level0: 7,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
},
|
||||
options.compaction
|
||||
options.storage.compaction,
|
||||
);
|
||||
assert_eq!(
|
||||
RegionManifestConfig {
|
||||
checkpoint_margin: Some(9),
|
||||
gc_duration: Some(Duration::from_secs(7)),
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,24 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -44,6 +56,12 @@ pub enum Error {
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
|
||||
ReadConfig {
|
||||
path: String,
|
||||
@@ -143,7 +161,10 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
|
||||
@@ -47,8 +47,10 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle frontend shutdown
|
||||
Ok(())
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownFrontendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::{info, logging, warn};
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
@@ -30,13 +32,14 @@ impl Instance {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)?;
|
||||
Ok(())
|
||||
.context(error::StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle metasrv shutdown
|
||||
Ok(())
|
||||
self.instance
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownMetaServerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,6 +82,10 @@ struct StartCommand {
|
||||
selector: Option<String>,
|
||||
#[clap(long)]
|
||||
use_memory_store: bool,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -127,6 +134,13 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
|
||||
opts.use_memory_store = true;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = cmd.http_addr {
|
||||
opts.http_opts.addr = http_addr;
|
||||
}
|
||||
if let Some(http_timeout) = cmd.http_timeout {
|
||||
opts.http_opts.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
}
|
||||
@@ -149,6 +163,8 @@ mod tests {
|
||||
config_file: None,
|
||||
selector: Some("LoadBased".to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
@@ -177,6 +193,8 @@ mod tests {
|
||||
selector: None,
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
use_memory_store: false,
|
||||
http_addr: None,
|
||||
http_timeout: None,
|
||||
};
|
||||
let options: MetaSrvOptions = cmd.try_into().unwrap();
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
|
||||
@@ -17,9 +17,7 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::info;
|
||||
use datanode::datanode::{
|
||||
CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig,
|
||||
};
|
||||
use datanode::datanode::{Datanode, DatanodeOptions, ProcedureConfig, StorageConfig, WalConfig};
|
||||
use datanode::instance::InstanceRef;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::grpc::GrpcOptions;
|
||||
@@ -36,7 +34,10 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -79,8 +80,7 @@ pub struct StandaloneOptions {
|
||||
pub prometheus_options: Option<PrometheusOptions>,
|
||||
pub prom_options: Option<PromOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
@@ -98,8 +98,7 @@ impl Default for StandaloneOptions {
|
||||
prometheus_options: Some(PrometheusOptions::default()),
|
||||
prom_options: Some(PromOptions::default()),
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
compaction: CompactionConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
@@ -126,7 +125,6 @@ impl StandaloneOptions {
|
||||
enable_memory_catalog: self.enable_memory_catalog,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
compaction: self.compaction,
|
||||
procedure: self.procedure,
|
||||
..Default::default()
|
||||
}
|
||||
@@ -152,7 +150,17 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle standalone shutdown
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown_instance()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -228,8 +236,9 @@ async fn build_frontend(
|
||||
plugins: Arc<Plugins>,
|
||||
datanode_instance: InstanceRef,
|
||||
) -> Result<FeInstance> {
|
||||
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
|
||||
frontend_instance.set_script_handler(datanode_instance);
|
||||
let mut frontend_instance = FeInstance::try_new_standalone(datanode_instance.clone())
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
frontend_instance.set_plugins(plugins.clone());
|
||||
Ok(frontend_instance)
|
||||
}
|
||||
|
||||
@@ -53,6 +53,10 @@ impl ReadableSize {
|
||||
pub const fn as_mb(self) -> u64 {
|
||||
self.0 / MIB
|
||||
}
|
||||
|
||||
pub const fn as_bytes(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<u64> for ReadableSize {
|
||||
|
||||
13
src/common/datasource/Cargo.toml
Normal file
13
src/common/datasource/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "common-datasource"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
url = "2.3"
|
||||
75
src/common/datasource/src/error.rs
Normal file
75
src/common/datasource/src/error.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
UnsupportedBackendProtocol { protocol: String },
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String },
|
||||
|
||||
#[snafu(display("Invalid path: {}", path))]
|
||||
InvalidPath { path: String },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
InvalidUrl { url: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| InvalidConnection { .. }
|
||||
| InvalidUrl { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
18
src/common/datasource/src/lib.rs
Normal file
18
src/common/datasource/src/lib.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod util;
|
||||
83
src/common/datasource/src/lister.rs
Normal file
83
src/common/datasource/src/lister.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::{future, TryStreamExt};
|
||||
use object_store::{Entry, ObjectStore};
|
||||
use regex::Regex;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
pub struct Lister {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl Lister {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
) -> Self {
|
||||
Lister {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Entry>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.list(&self.path)
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = self
|
||||
.regex
|
||||
.as_ref()
|
||||
.map(|x| x.is_match(f.name()))
|
||||
.unwrap_or(true);
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
// make sure this file exists
|
||||
let file_full_path = format!("{}{}", self.path, filename);
|
||||
let _ = self.object_store.stat(&file_full_path).await.context(
|
||||
error::ListObjectsSnafu {
|
||||
path: &file_full_path,
|
||||
},
|
||||
)?;
|
||||
Ok(vec![Entry::new(&file_full_path)])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
60
src/common/datasource/src/object_store.rs
Normal file
60
src/common/datasource/src/object_store.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod fs;
|
||||
pub mod s3;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::ObjectStore;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use self::fs::build_fs_backend;
|
||||
use self::s3::build_s3_backend;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// parse url returns (schema,Option<host>,path)
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
Ok(url) => Ok((
|
||||
url.scheme().to_string(),
|
||||
url.host_str().map(|s| s.to_string()),
|
||||
url.path().to_string(),
|
||||
)),
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let host = host.context(error::EmptyHostPathSnafu {
|
||||
url: url.to_string(),
|
||||
})?;
|
||||
Ok(build_s3_backend(&host, "/", connection)?)
|
||||
}
|
||||
FS_SCHEMA => Ok(build_fs_backend("/")?),
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
|
||||
}
|
||||
}
|
||||
28
src/common/datasource/src/object_store/fs.rs
Normal file
28
src/common/datasource/src/object_store/fs.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use object_store::services::Fs;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{BuildBackendSnafu, Result};
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let mut builder = Fs::default();
|
||||
builder.root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(BuildBackendSnafu)?
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
79
src/common/datasource/src/object_store/s3.rs
Normal file
79
src/common/datasource/src/object_store/s3.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::services::S3;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: &str,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
builder.bucket(host);
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
125
src/common/datasource/src/util.rs
Normal file
125
src/common/datasource/src/util.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,8 @@ macro_rules! ok {
|
||||
}
|
||||
|
||||
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
let mut result = TokenStream::new();
|
||||
|
||||
// extract arg map
|
||||
let arg_pairs = parse_macro_input!(args as AttributeArgs);
|
||||
let arg_span = arg_pairs[0].span();
|
||||
@@ -59,12 +61,17 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
||||
let arg_types = ok!(extract_input_types(inputs));
|
||||
|
||||
// build the struct and its impl block
|
||||
let struct_code = build_struct(
|
||||
attrs,
|
||||
vis,
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
ok!(get_ident(&arg_map, "display_name", arg_span)),
|
||||
);
|
||||
// only do this when `display_name` is specified
|
||||
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
|
||||
let struct_code = build_struct(
|
||||
attrs,
|
||||
vis,
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
display_name,
|
||||
);
|
||||
result.extend(struct_code);
|
||||
}
|
||||
|
||||
let calc_fn_code = build_calc_fn(
|
||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
||||
arg_types,
|
||||
@@ -77,8 +84,6 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
||||
}
|
||||
.into();
|
||||
|
||||
let mut result = TokenStream::new();
|
||||
result.extend(struct_code);
|
||||
result.extend(calc_fn_code);
|
||||
result.extend(input_fn_code);
|
||||
result
|
||||
|
||||
@@ -11,11 +11,17 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::sync::Arc;
|
||||
mod to_unixtime;
|
||||
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(_registry: &FunctionRegistry) {}
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction::default()));
|
||||
}
|
||||
}
|
||||
|
||||
148
src/common/function/src/scalars/timestamp/to_unixtime.rs
Normal file
148
src/common/function/src/scalars/timestamp/to_unixtime.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToUnixtimeFunction;
|
||||
|
||||
const NAME: &str = "to_unixtime";
|
||||
|
||||
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||
match Timestamp::from_str(arg) {
|
||||
Ok(ts) => {
|
||||
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
|
||||
Some(ts.value().div_euclid(sec_mul))
|
||||
}
|
||||
Err(_err) => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToUnixtimeFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::timestamp_second_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::String(StringType)],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
error::InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array).unwrap();
|
||||
Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
)))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToUnixtimeFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "TO_UNIXTIME")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::String(StringType)]
|
||||
));
|
||||
|
||||
let times = vec![
|
||||
Some("2023-03-01T06:35:02Z"),
|
||||
None,
|
||||
Some("2022-06-30T23:59:60Z"),
|
||||
Some("invalid_time_stamp"),
|
||||
];
|
||||
let results = vec![Some(1677652502), None, Some(1656633600), None];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ pub enum Error {
|
||||
DecodeInsert { source: DecodeError },
|
||||
|
||||
#[snafu(display("Illegal insert data"))]
|
||||
IllegalInsertData,
|
||||
IllegalInsertData { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
|
||||
@@ -26,7 +26,7 @@ tower = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
[[bench]]
|
||||
name = "bench_main"
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::string::FromUtf8Error;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_error::prelude::*;
|
||||
@@ -47,10 +48,11 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to put {}, source: {}", key, source))]
|
||||
#[snafu(display("Failed to put state, key: '{key}', source: {source}"))]
|
||||
PutState {
|
||||
key: String,
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to delete {}, source: {}", key, source))]
|
||||
@@ -59,10 +61,18 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to delete keys: '{keys}', source: {source}"))]
|
||||
DeleteStates {
|
||||
keys: String,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list state, path: '{path}', source: {source}"))]
|
||||
ListState {
|
||||
path: String,
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read {}, source: {}", key, source))]
|
||||
@@ -107,6 +117,9 @@ pub enum Error {
|
||||
source: Arc<Error>,
|
||||
procedure_id: ProcedureId,
|
||||
},
|
||||
|
||||
#[snafu(display("Corrupted data, error: {source}"))]
|
||||
CorruptedData { source: FromUtf8Error },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -114,11 +127,13 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::External { source } => source.status_code(),
|
||||
Error::External { source }
|
||||
| Error::PutState { source, .. }
|
||||
| Error::DeleteStates { source, .. }
|
||||
| Error::ListState { source, .. } => source.status_code(),
|
||||
|
||||
Error::ToJson { .. }
|
||||
| Error::PutState { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::ListState { .. }
|
||||
| Error::ReadState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::RetryTimesExceeded { .. }
|
||||
@@ -127,7 +142,7 @@ impl ErrorExt for Error {
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
Error::ProcedurePanic { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedurePanic { .. } | Error::CorruptedData { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedureExec { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
pub mod error;
|
||||
pub mod local;
|
||||
mod procedure;
|
||||
mod store;
|
||||
pub mod store;
|
||||
pub mod watcher;
|
||||
|
||||
pub use crate::error::{Error, Result};
|
||||
|
||||
@@ -22,7 +22,6 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_telemetry::logging;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ensure;
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::Notify;
|
||||
@@ -31,7 +30,7 @@ use crate::error::{DuplicateProcedureSnafu, LoaderConflictSnafu, Result};
|
||||
use crate::local::lock::LockMap;
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::BoxedProcedureLoader;
|
||||
use crate::store::{ObjectStateStore, ProcedureMessage, ProcedureStore, StateStoreRef};
|
||||
use crate::store::{ProcedureMessage, ProcedureStore, StateStoreRef};
|
||||
use crate::{
|
||||
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
|
||||
ProcedureWithId, Watcher,
|
||||
@@ -291,12 +290,19 @@ impl ManagerContext {
|
||||
/// Config for [LocalManager].
|
||||
#[derive(Debug)]
|
||||
pub struct ManagerConfig {
|
||||
/// Object store
|
||||
pub object_store: ObjectStore,
|
||||
pub max_retry_times: usize,
|
||||
pub retry_delay: Duration,
|
||||
}
|
||||
|
||||
impl Default for ManagerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A [ProcedureManager] that maintains procedure states locally.
|
||||
pub struct LocalManager {
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
@@ -307,10 +313,10 @@ pub struct LocalManager {
|
||||
|
||||
impl LocalManager {
|
||||
/// Create a new [LocalManager] with specific `config`.
|
||||
pub fn new(config: ManagerConfig) -> LocalManager {
|
||||
pub fn new(config: ManagerConfig, state_store: StateStoreRef) -> LocalManager {
|
||||
LocalManager {
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
state_store: Arc::new(ObjectStateStore::new(config.object_store)),
|
||||
state_store,
|
||||
max_retry_times: config.max_retry_times,
|
||||
retry_delay: config.retry_delay,
|
||||
}
|
||||
@@ -423,7 +429,7 @@ impl ProcedureManager for LocalManager {
|
||||
mod test_util {
|
||||
use common_test_util::temp_dir::TempDir;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
use object_store::ObjectStore;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -433,8 +439,9 @@ mod test_util {
|
||||
|
||||
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
ObjectStore::new(accessor).finish()
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
ObjectStore::new(builder).unwrap().finish()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,6 +453,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::store::ObjectStateStore;
|
||||
use crate::{Context, Procedure, Status};
|
||||
|
||||
#[test]
|
||||
@@ -554,11 +562,11 @@ mod tests {
|
||||
fn test_register_loader() {
|
||||
let dir = create_temp_dir("register");
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
@@ -575,11 +583,11 @@ mod tests {
|
||||
let dir = create_temp_dir("recover");
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let config = ManagerConfig {
|
||||
object_store: object_store.clone(),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
@@ -621,11 +629,11 @@ mod tests {
|
||||
async fn test_submit_procedure() {
|
||||
let dir = create_temp_dir("submit");
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
@@ -669,11 +677,11 @@ mod tests {
|
||||
async fn test_state_changed_on_err() {
|
||||
let dir = create_temp_dir("on_err");
|
||||
let config = ManagerConfig {
|
||||
object_store: test_util::new_object_store(&dir),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
};
|
||||
let manager = LocalManager::new(config);
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {
|
||||
|
||||
@@ -473,8 +473,7 @@ mod tests {
|
||||
|
||||
async fn check_files(object_store: &ObjectStore, procedure_id: ProcedureId, files: &[&str]) {
|
||||
let dir = format!("{procedure_id}/");
|
||||
let object = object_store.object(&dir);
|
||||
let lister = object.list().await.unwrap();
|
||||
let lister = object_store.list(&dir).await.unwrap();
|
||||
let mut files_in_dir: Vec<_> = lister
|
||||
.map_ok(|de| de.name().to_string())
|
||||
.try_collect()
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::error::{Result, ToJsonSnafu};
|
||||
pub(crate) use crate::store::state_store::{ObjectStateStore, StateStoreRef};
|
||||
use crate::{BoxedProcedure, ProcedureId};
|
||||
|
||||
mod state_store;
|
||||
pub mod state_store;
|
||||
|
||||
/// Serialized data of a procedure.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@@ -248,15 +248,15 @@ mod tests {
|
||||
use async_trait::async_trait;
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::{Context, LockKey, Procedure, Status};
|
||||
|
||||
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
|
||||
ProcedureStore::from(object_store)
|
||||
}
|
||||
|
||||
@@ -15,22 +15,25 @@
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use async_trait::async_trait;
|
||||
use futures::{Stream, TryStreamExt};
|
||||
use object_store::{ObjectMode, ObjectStore};
|
||||
use common_error::ext::PlainError;
|
||||
use common_error::prelude::{BoxedError, StatusCode};
|
||||
use futures::{Stream, StreamExt};
|
||||
use object_store::{EntryMode, Metakey, ObjectStore};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
|
||||
use crate::error::{DeleteStateSnafu, ListStateSnafu, PutStateSnafu, Result};
|
||||
|
||||
/// Key value from state store.
|
||||
type KeyValue = (String, Vec<u8>);
|
||||
pub type KeyValue = (String, Vec<u8>);
|
||||
|
||||
/// Stream that yields [KeyValue].
|
||||
type KeyValueStream = Pin<Box<dyn Stream<Item = Result<KeyValue>> + Send>>;
|
||||
pub type KeyValueStream = Pin<Box<dyn Stream<Item = Result<KeyValue>> + Send>>;
|
||||
|
||||
/// Storage layer for persisting procedure's state.
|
||||
#[async_trait]
|
||||
pub(crate) trait StateStore: Send + Sync {
|
||||
pub trait StateStore: Send + Sync {
|
||||
/// Puts `key` and `value` into the store.
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()>;
|
||||
|
||||
@@ -50,13 +53,13 @@ pub(crate) type StateStoreRef = Arc<dyn StateStore>;
|
||||
|
||||
/// [StateStore] based on [ObjectStore].
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ObjectStateStore {
|
||||
pub struct ObjectStateStore {
|
||||
store: ObjectStore,
|
||||
}
|
||||
|
||||
impl ObjectStateStore {
|
||||
/// Returns a new [ObjectStateStore] with specific `store`.
|
||||
pub(crate) fn new(store: ObjectStore) -> ObjectStateStore {
|
||||
pub fn new(store: ObjectStore) -> ObjectStateStore {
|
||||
ObjectStateStore { store }
|
||||
}
|
||||
}
|
||||
@@ -64,49 +67,83 @@ impl ObjectStateStore {
|
||||
#[async_trait]
|
||||
impl StateStore for ObjectStateStore {
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()> {
|
||||
let object = self.store.object(key);
|
||||
object.write(value).await.context(PutStateSnafu { key })
|
||||
self.store
|
||||
.write(key, value)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(PutStateSnafu { key })
|
||||
}
|
||||
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
|
||||
let path_string = path.to_string();
|
||||
|
||||
let lister = self
|
||||
let mut lister = self
|
||||
.store
|
||||
.object(path)
|
||||
.scan()
|
||||
.scan(path)
|
||||
.await
|
||||
.map_err(|e| Error::ListState {
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.with_context(|_| ListStateSnafu {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
let stream = lister
|
||||
.try_filter_map(|entry| async move {
|
||||
let store = self.store.clone();
|
||||
|
||||
let stream = try_stream!({
|
||||
while let Some(res) = lister.next().await {
|
||||
let entry = res
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(ListStateSnafu { path: &path_string })?;
|
||||
let key = entry.path();
|
||||
let key_value = match entry.mode().await? {
|
||||
ObjectMode::FILE => {
|
||||
let value = entry.read().await?;
|
||||
|
||||
Some((key.to_string(), value))
|
||||
}
|
||||
ObjectMode::DIR | ObjectMode::Unknown => None,
|
||||
};
|
||||
|
||||
Ok(key_value)
|
||||
})
|
||||
.map_err(move |e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
});
|
||||
let metadata = store
|
||||
.metadata(&entry, Metakey::Mode)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(ListStateSnafu { path: key })?;
|
||||
if let EntryMode::FILE = metadata.mode() {
|
||||
let value = store
|
||||
.read(key)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BoxedError::new(PlainError::new(
|
||||
e.to_string(),
|
||||
StatusCode::StorageUnavailable,
|
||||
))
|
||||
})
|
||||
.context(ListStateSnafu { path: key })?;
|
||||
yield (key.to_string(), value);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
async fn delete(&self, keys: &[String]) -> Result<()> {
|
||||
for key in keys {
|
||||
let object = self.store.object(key);
|
||||
object.delete().await.context(DeleteStateSnafu { key })?;
|
||||
self.store
|
||||
.delete(key)
|
||||
.await
|
||||
.context(DeleteStateSnafu { key })?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -116,8 +153,8 @@ impl StateStore for ObjectStateStore {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -125,8 +162,10 @@ mod tests {
|
||||
async fn test_object_state_store() {
|
||||
let dir = create_temp_dir("state_store");
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
let data: Vec<_> = state_store
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::prelude::*;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -70,6 +71,26 @@ pub enum Error {
|
||||
source: datafusion_common::DataFusionError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Column {} not exists in table {}", column_name, table_name))]
|
||||
ColumnNotExists {
|
||||
column_name: String,
|
||||
table_name: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to cast vector of type '{:?}' to type '{:?}', source: {}",
|
||||
from_type,
|
||||
to_type,
|
||||
source
|
||||
))]
|
||||
CastVector {
|
||||
from_type: ConcreteDataType,
|
||||
to_type: ConcreteDataType,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -81,11 +102,14 @@ impl ErrorExt for Error {
|
||||
| Error::CreateRecordBatches { .. }
|
||||
| Error::PollStream { .. }
|
||||
| Error::Format { .. }
|
||||
| Error::InitRecordbatchStream { .. } => StatusCode::Internal,
|
||||
| Error::InitRecordbatchStream { .. }
|
||||
| Error::ColumnNotExists { .. } => StatusCode::Internal,
|
||||
|
||||
Error::External { source } => source.status_code(),
|
||||
|
||||
Error::SchemaConversion { source, .. } => source.status_code(),
|
||||
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,14 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use serde::ser::{Error, SerializeStruct};
|
||||
use serde::{Serialize, Serializer};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, CastVectorSnafu, ColumnNotExistsSnafu, Result};
|
||||
use crate::DfRecordBatch;
|
||||
|
||||
/// A two-dimensional batch of column-oriented data with a defined schema.
|
||||
@@ -108,6 +110,41 @@ impl RecordBatch {
|
||||
pub fn rows(&self) -> RecordBatchRowIterator<'_> {
|
||||
RecordBatchRowIterator::new(self)
|
||||
}
|
||||
|
||||
pub fn column_vectors(
|
||||
&self,
|
||||
table_name: &str,
|
||||
table_schema: SchemaRef,
|
||||
) -> Result<HashMap<String, VectorRef>> {
|
||||
let mut vectors = HashMap::with_capacity(self.num_columns());
|
||||
|
||||
// column schemas in recordbatch must match its vectors, otherwise it's corrupted
|
||||
for (vector_schema, vector) in self.schema.column_schemas().iter().zip(self.columns.iter())
|
||||
{
|
||||
let column_name = &vector_schema.name;
|
||||
let column_schema =
|
||||
table_schema
|
||||
.column_schema_by_name(column_name)
|
||||
.context(ColumnNotExistsSnafu {
|
||||
table_name,
|
||||
column_name,
|
||||
})?;
|
||||
let vector = if vector_schema.data_type != column_schema.data_type {
|
||||
vector
|
||||
.cast(&column_schema.data_type)
|
||||
.with_context(|_| CastVectorSnafu {
|
||||
from_type: vector.data_type(),
|
||||
to_type: column_schema.data_type.clone(),
|
||||
})?
|
||||
} else {
|
||||
vector.clone()
|
||||
};
|
||||
|
||||
vectors.insert(column_name.clone(), vector);
|
||||
}
|
||||
|
||||
Ok(vectors)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for RecordBatch {
|
||||
|
||||
@@ -5,6 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
metrics = "0.20"
|
||||
@@ -12,6 +13,7 @@ once_cell = "1.12"
|
||||
paste.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -26,6 +27,19 @@ pub enum Error {
|
||||
source: std::io::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
#[snafu(display("Repeated task {} not started yet", name))]
|
||||
IllegalState { name: String, backtrace: Backtrace },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to wait for repeated task {} to stop, source: {}",
|
||||
name,
|
||||
source
|
||||
))]
|
||||
WaitGcTaskStop {
|
||||
name: String,
|
||||
source: JoinError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
pub mod error;
|
||||
mod global;
|
||||
pub mod metric;
|
||||
mod metrics;
|
||||
mod repeated_task;
|
||||
pub mod runtime;
|
||||
|
||||
pub use global::{
|
||||
@@ -23,4 +24,5 @@ pub use global::{
|
||||
spawn_read, spawn_write, write_runtime,
|
||||
};
|
||||
|
||||
pub use crate::repeated_task::{RepeatedTask, TaskFunction, TaskFunctionRef};
|
||||
pub use crate::runtime::{Builder, JoinError, JoinHandle, Runtime};
|
||||
|
||||
174
src/common/runtime/src/repeated_task.rs
Normal file
174
src/common/runtime/src/repeated_task.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::prelude::ErrorExt;
|
||||
use common_telemetry::logging;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::error::{IllegalStateSnafu, Result, WaitGcTaskStopSnafu};
|
||||
use crate::Runtime;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait TaskFunction<E: ErrorExt> {
|
||||
async fn call(&self) -> std::result::Result<(), E>;
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
pub type TaskFunctionRef<E> = Arc<dyn TaskFunction<E> + Send + Sync>;
|
||||
|
||||
pub struct RepeatedTask<E> {
|
||||
cancel_token: Mutex<Option<CancellationToken>>,
|
||||
task_handle: Mutex<Option<JoinHandle<()>>>,
|
||||
started: AtomicBool,
|
||||
interval: Duration,
|
||||
task_fn: TaskFunctionRef<E>,
|
||||
}
|
||||
|
||||
impl<E: ErrorExt> std::fmt::Display for RepeatedTask<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "RepeatedTask({})", self.task_fn.name())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: ErrorExt> std::fmt::Debug for RepeatedTask<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("RepeatedTask")
|
||||
.field(&self.task_fn.name())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: ErrorExt + 'static> RepeatedTask<E> {
|
||||
pub fn new(interval: Duration, task_fn: TaskFunctionRef<E>) -> Self {
|
||||
Self {
|
||||
cancel_token: Mutex::new(None),
|
||||
task_handle: Mutex::new(None),
|
||||
started: AtomicBool::new(false),
|
||||
interval,
|
||||
task_fn,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn started(&self) -> bool {
|
||||
self.started.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub async fn start(&self, runtime: Runtime) -> Result<()> {
|
||||
let token = CancellationToken::new();
|
||||
let interval = self.interval;
|
||||
let child = token.child_token();
|
||||
let task_fn = self.task_fn.clone();
|
||||
// TODO(hl): Maybe spawn to a blocking runtime.
|
||||
let handle = runtime.spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(interval) => {}
|
||||
_ = child.cancelled() => {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if let Err(e) = task_fn.call().await {
|
||||
logging::error!(e; "Failed to run repeated task: {}", task_fn.name());
|
||||
}
|
||||
}
|
||||
});
|
||||
*self.cancel_token.lock().await = Some(token);
|
||||
*self.task_handle.lock().await = Some(handle);
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
|
||||
logging::debug!(
|
||||
"Repeated task {} started with interval: {:?}",
|
||||
self.task_fn.name(),
|
||||
self.interval
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
let name = self.task_fn.name();
|
||||
ensure!(
|
||||
self.started
|
||||
.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok(),
|
||||
IllegalStateSnafu { name }
|
||||
);
|
||||
let token = self
|
||||
.cancel_token
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.context(IllegalStateSnafu { name })?;
|
||||
let handle = self
|
||||
.task_handle
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.context(IllegalStateSnafu { name })?;
|
||||
|
||||
token.cancel();
|
||||
handle.await.context(WaitGcTaskStopSnafu { name })?;
|
||||
|
||||
logging::debug!("Repeated task {} stopped", name);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::AtomicI32;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct TickTask {
|
||||
n: AtomicI32,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TaskFunction<crate::error::Error> for TickTask {
|
||||
fn name(&self) -> &str {
|
||||
"test"
|
||||
}
|
||||
|
||||
async fn call(&self) -> Result<()> {
|
||||
self.n.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_repeated_task() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let task_fn = Arc::new(TickTask {
|
||||
n: AtomicI32::new(0),
|
||||
});
|
||||
|
||||
let task = RepeatedTask::new(Duration::from_millis(100), task_fn.clone());
|
||||
|
||||
task.start(crate::bg_runtime()).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(550)).await;
|
||||
task.stop().await.unwrap();
|
||||
|
||||
assert_eq!(task_fn.n.load(Ordering::Relaxed), 5);
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ use tokio::sync::oneshot;
|
||||
pub use tokio::task::{JoinError, JoinHandle};
|
||||
|
||||
use crate::error::*;
|
||||
use crate::metric::*;
|
||||
use crate::metrics::*;
|
||||
|
||||
/// A runtime to run future tasks
|
||||
#[derive(Clone, Debug)]
|
||||
|
||||
@@ -581,7 +581,7 @@ pub fn expression_from_df_expr(
|
||||
| Expr::ScalarSubquery(..)
|
||||
| Expr::Placeholder { .. }
|
||||
| Expr::QualifiedWildcard { .. } => todo!(),
|
||||
Expr::GroupingSet(_) => UnsupportedExprSnafu {
|
||||
Expr::GroupingSet(_) | Expr::OuterReferenceColumn(_, _) => UnsupportedExprSnafu {
|
||||
name: expr.to_string(),
|
||||
}
|
||||
.fail()?,
|
||||
|
||||
@@ -22,9 +22,10 @@ use catalog::CatalogManagerRef;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_telemetry::debug;
|
||||
use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
|
||||
use datafusion::common::{DFField, DFSchema, OwnedTableReference};
|
||||
use datafusion::common::{DFField, DFSchema};
|
||||
use datafusion::datasource::DefaultTableSource;
|
||||
use datafusion::physical_plan::project_schema;
|
||||
use datafusion::sql::TableReference;
|
||||
use datafusion_expr::{Filter, LogicalPlan, TableScan};
|
||||
use prost::Message;
|
||||
use session::context::QueryContext;
|
||||
@@ -240,13 +241,13 @@ impl DFLogicalSubstraitConvertor {
|
||||
.projection
|
||||
.map(|mask_expr| self.convert_mask_expression(mask_expr));
|
||||
|
||||
let table_ref = OwnedTableReference::Full {
|
||||
catalog: catalog_name.clone(),
|
||||
schema: schema_name.clone(),
|
||||
table: table_name.clone(),
|
||||
};
|
||||
let table_ref = TableReference::full(
|
||||
catalog_name.clone(),
|
||||
schema_name.clone(),
|
||||
table_name.clone(),
|
||||
);
|
||||
let adapter = table_provider
|
||||
.resolve_table(table_ref)
|
||||
.resolve_table(table_ref.clone())
|
||||
.await
|
||||
.with_context(|_| ResolveTableSnafu {
|
||||
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
@@ -272,14 +273,13 @@ impl DFLogicalSubstraitConvertor {
|
||||
};
|
||||
|
||||
// Calculate the projected schema
|
||||
let qualified = &format_full_table_name(&catalog_name, &schema_name, &table_name);
|
||||
let projected_schema = Arc::new(
|
||||
project_schema(&stored_schema, projection.as_ref())
|
||||
.and_then(|x| {
|
||||
DFSchema::new_with_metadata(
|
||||
x.fields()
|
||||
.iter()
|
||||
.map(|f| DFField::from_qualified(qualified, f.clone()))
|
||||
.map(|f| DFField::from_qualified(table_ref.clone(), f.clone()))
|
||||
.collect(),
|
||||
x.metadata().clone(),
|
||||
)
|
||||
@@ -291,7 +291,7 @@ impl DFLogicalSubstraitConvertor {
|
||||
|
||||
// TODO(ruihang): Support limit(fetch)
|
||||
Ok(LogicalPlan::TableScan(TableScan {
|
||||
table_name: qualified.to_string(),
|
||||
table_name: table_ref,
|
||||
source: adapter,
|
||||
projection,
|
||||
projected_schema,
|
||||
@@ -620,10 +620,13 @@ mod test {
|
||||
let projected_schema =
|
||||
Arc::new(DFSchema::new_with_metadata(projected_fields, Default::default()).unwrap());
|
||||
|
||||
let table_name = TableReference::full(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
DEFAULT_TABLE_NAME,
|
||||
);
|
||||
let table_scan_plan = LogicalPlan::TableScan(TableScan {
|
||||
table_name: format!(
|
||||
"{DEFAULT_CATALOG_NAME}.{DEFAULT_SCHEMA_NAME}.{DEFAULT_TABLE_NAME}",
|
||||
),
|
||||
table_name,
|
||||
source: adapter,
|
||||
projection: Some(projection),
|
||||
projected_schema,
|
||||
|
||||
@@ -12,7 +12,7 @@ deadlock_detection = ["parking_lot"]
|
||||
backtrace = "0.3"
|
||||
common-error = { path = "../error" }
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
metrics = "0.20"
|
||||
metrics = "0.20.1"
|
||||
metrics-exporter-prometheus = { version = "0.11", default-features = false }
|
||||
once_cell = "1.10"
|
||||
opentelemetry = { version = "0.17", default-features = false, features = [
|
||||
|
||||
@@ -12,4 +12,4 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![feature(int_roundings)]
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -26,6 +26,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
|
||||
use crate::util::div_ceil;
|
||||
|
||||
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
|
||||
pub struct Timestamp {
|
||||
@@ -143,7 +144,7 @@ impl Timestamp {
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
|
||||
Some(Timestamp::new(div_ceil(self.value, mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,17 @@ pub fn current_time_millis() -> i64 {
|
||||
chrono::Utc::now().timestamp_millis()
|
||||
}
|
||||
|
||||
/// Port of rust unstable features `int_roundings`.
|
||||
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
|
||||
let d = this / rhs;
|
||||
let r = this % rhs;
|
||||
if r > 0 && rhs > 0 {
|
||||
d + 1
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::{self, SystemTime};
|
||||
@@ -42,4 +53,10 @@ mod tests {
|
||||
assert_eq!(datetime_std.hour(), datetime_now.hour());
|
||||
assert_eq!(datetime_std.minute(), datetime_now.minute());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_div_ceil() {
|
||||
let v0 = 9223372036854676001;
|
||||
assert_eq!(9223372036854677, div_ceil(v0, 1000));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
python = ["dep:script"]
|
||||
|
||||
[dependencies]
|
||||
async-compat = "0.2"
|
||||
async-stream.workspace = true
|
||||
@@ -20,6 +16,8 @@ catalog = { path = "../catalog" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-datasource = { path = "../common/datasource" }
|
||||
common-function = { path = "../common/function" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
@@ -36,6 +34,7 @@ futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
humantime-serde = "1.1"
|
||||
log = "0.4"
|
||||
log-store = { path = "../log-store" }
|
||||
meta-client = { path = "../meta-client" }
|
||||
meta-srv = { path = "../meta-srv", features = ["mock"] }
|
||||
@@ -46,7 +45,6 @@ pin-project = "1.0"
|
||||
prost.workspace = true
|
||||
query = { path = "../query" }
|
||||
regex = "1.6"
|
||||
script = { path = "../script", features = ["python"], optional = true }
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
servers = { path = "../servers" }
|
||||
@@ -64,7 +62,6 @@ tonic.workspace = true
|
||||
tower = { version = "0.4", features = ["full"] }
|
||||
tower-http = { version = "0.3", features = ["full"] }
|
||||
url = "2.3.1"
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
|
||||
|
||||
@@ -19,6 +19,7 @@ use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::SchedulerConfig;
|
||||
@@ -29,6 +30,7 @@ use crate::server::Services;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize(1024);
|
||||
|
||||
/// Object storage config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ObjectStoreConfig {
|
||||
@@ -37,6 +39,16 @@ pub enum ObjectStoreConfig {
|
||||
Oss(OssConfig),
|
||||
}
|
||||
|
||||
/// Storage engine config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(default)]
|
||||
pub struct StorageConfig {
|
||||
#[serde(flatten)]
|
||||
pub store: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub manifest: RegionManifestConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct FileConfig {
|
||||
@@ -107,6 +119,27 @@ impl Default for WalConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for region manifest
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct RegionManifestConfig {
|
||||
/// Region manifest checkpoint actions margin.
|
||||
/// Manifest service create a checkpoint every [checkpoint_margin] actions.
|
||||
pub checkpoint_margin: Option<u16>,
|
||||
/// Region manifest logs and checkpoints gc task execution duration.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub gc_duration: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for RegionManifestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
checkpoint_margin: Some(10u16),
|
||||
gc_duration: Some(Duration::from_secs(30)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for table compaction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
@@ -117,6 +150,8 @@ pub struct CompactionConfig {
|
||||
pub max_files_in_level0: usize,
|
||||
/// Max task number for SST purge task after compaction.
|
||||
pub max_purge_tasks: usize,
|
||||
/// Buffer threshold while writing SST files
|
||||
pub sst_write_buffer_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for CompactionConfig {
|
||||
@@ -125,6 +160,7 @@ impl Default for CompactionConfig {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -132,7 +168,7 @@ impl Default for CompactionConfig {
|
||||
impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
max_inflight_tasks: value.compaction.max_inflight_tasks,
|
||||
max_inflight_tasks: value.storage.compaction.max_inflight_tasks,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -140,8 +176,11 @@ impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
max_files_in_l0: value.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.compaction.max_purge_tasks,
|
||||
manifest_checkpoint_margin: value.storage.manifest.checkpoint_margin,
|
||||
manifest_gc_duration: value.storage.manifest.gc_duration,
|
||||
max_files_in_l0: value.storage.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.storage.compaction.max_purge_tasks,
|
||||
sst_write_buffer_size: value.storage.compaction.sst_write_buffer_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -190,10 +229,10 @@ pub struct DatanodeOptions {
|
||||
pub rpc_runtime_size: usize,
|
||||
pub mysql_addr: String,
|
||||
pub mysql_runtime_size: usize,
|
||||
pub http_opts: HttpOptions,
|
||||
pub meta_client_options: Option<MetaClientOptions>,
|
||||
pub wal: WalConfig,
|
||||
pub storage: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub procedure: Option<ProcedureConfig>,
|
||||
}
|
||||
|
||||
@@ -208,10 +247,10 @@ impl Default for DatanodeOptions {
|
||||
rpc_runtime_size: 8,
|
||||
mysql_addr: "127.0.0.1:4406".to_string(),
|
||||
mysql_runtime_size: 2,
|
||||
http_opts: HttpOptions::default(),
|
||||
meta_client_options: None,
|
||||
wal: WalConfig::default(),
|
||||
storage: ObjectStoreConfig::default(),
|
||||
compaction: CompactionConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
procedure: None,
|
||||
}
|
||||
}
|
||||
@@ -220,14 +259,17 @@ impl Default for DatanodeOptions {
|
||||
/// Datanode service.
|
||||
pub struct Datanode {
|
||||
opts: DatanodeOptions,
|
||||
services: Services,
|
||||
services: Option<Services>,
|
||||
instance: InstanceRef,
|
||||
}
|
||||
|
||||
impl Datanode {
|
||||
pub async fn new(opts: DatanodeOptions) -> Result<Datanode> {
|
||||
let instance = Arc::new(Instance::new(&opts).await?);
|
||||
let services = Services::try_new(instance.clone(), &opts).await?;
|
||||
let services = match opts.mode {
|
||||
Mode::Distributed => Some(Services::try_new(instance.clone(), &opts).await?),
|
||||
Mode::Standalone => None,
|
||||
};
|
||||
Ok(Self {
|
||||
opts,
|
||||
services,
|
||||
@@ -248,19 +290,27 @@ impl Datanode {
|
||||
|
||||
/// Start services of datanode. This method call will block until services are shutdown.
|
||||
pub async fn start_services(&mut self) -> Result<()> {
|
||||
self.services.start(&self.opts).await
|
||||
if let Some(service) = self.services.as_mut() {
|
||||
service.start(&self.opts).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_instance(&self) -> InstanceRef {
|
||||
self.instance.clone()
|
||||
}
|
||||
|
||||
async fn shutdown_instance(&self) -> Result<()> {
|
||||
pub async fn shutdown_instance(&self) -> Result<()> {
|
||||
self.instance.shutdown().await
|
||||
}
|
||||
|
||||
async fn shutdown_services(&self) -> Result<()> {
|
||||
self.services.shutdown().await
|
||||
if let Some(service) = self.services.as_ref() {
|
||||
service.shutdown().await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
|
||||
@@ -14,11 +14,10 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_datasource::error::Error as DataSourceError;
|
||||
use common_error::prelude::*;
|
||||
use common_procedure::ProcedureId;
|
||||
use common_recordbatch::error::Error as RecordBatchError;
|
||||
use datafusion::parquet;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use storage::error::Error as StorageError;
|
||||
use table::error::Error as TableError;
|
||||
use url::ParseError;
|
||||
@@ -124,24 +123,6 @@ pub enum Error {
|
||||
))]
|
||||
ColumnValuesNumberMismatch { columns: usize, values: usize },
|
||||
|
||||
#[snafu(display(
|
||||
"Column type mismatch, column: {}, expected type: {:?}, actual: {:?}",
|
||||
column,
|
||||
expected,
|
||||
actual,
|
||||
))]
|
||||
ColumnTypeMismatch {
|
||||
column: String,
|
||||
expected: ConcreteDataType,
|
||||
actual: ConcreteDataType,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect record batch, source: {}", source))]
|
||||
CollectRecords {
|
||||
#[snafu(backtrace)]
|
||||
source: RecordBatchError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse sql value, source: {}", source))]
|
||||
ParseSqlValue {
|
||||
#[snafu(backtrace)]
|
||||
@@ -200,6 +181,9 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create directory {}, source: {}", dir, source))]
|
||||
CreateDir { dir: String, source: std::io::Error },
|
||||
|
||||
#[snafu(display("Failed to remove directory {}, source: {}", dir, source))]
|
||||
RemoveDir { dir: String, source: std::io::Error },
|
||||
|
||||
#[snafu(display("Failed to open log store, source: {}", source))]
|
||||
OpenLogStore {
|
||||
#[snafu(backtrace)]
|
||||
@@ -218,7 +202,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse url, source: {}", source))]
|
||||
ParseUrl {
|
||||
source: DataSourceError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
@@ -249,6 +239,12 @@ pub enum Error {
|
||||
source: regex::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list objects, source: {}", source))]
|
||||
ListObjects {
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse the data, source: {}", source))]
|
||||
ParseDataTypes {
|
||||
#[snafu(backtrace)]
|
||||
@@ -318,12 +314,6 @@ pub enum Error {
|
||||
source: sql::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start script manager, source: {}", source))]
|
||||
StartScriptManager {
|
||||
#[snafu(backtrace)]
|
||||
source: script::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to parse string to timestamp, string: {}, source: {}",
|
||||
raw,
|
||||
@@ -443,12 +433,6 @@ pub enum Error {
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write parquet file, source: {}", source))]
|
||||
WriteParquet {
|
||||
source: parquet::errors::ParquetError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to poll stream, source: {}", source))]
|
||||
PollStream {
|
||||
source: datafusion_common::DataFusionError,
|
||||
@@ -475,13 +459,6 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to lists object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
@@ -525,6 +502,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to copy table to parquet file, source: {}", source))]
|
||||
WriteParquet {
|
||||
#[snafu(backtrace)]
|
||||
source: storage::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -550,8 +533,6 @@ impl ErrorExt for Error {
|
||||
|
||||
Insert { source, .. } => source.status_code(),
|
||||
Delete { source, .. } => source.status_code(),
|
||||
CollectRecords { source, .. } => source.status_code(),
|
||||
|
||||
TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
|
||||
|
||||
@@ -564,7 +545,6 @@ impl ErrorExt for Error {
|
||||
ConvertSchema { source, .. } | VectorComputation { source } => source.status_code(),
|
||||
|
||||
ColumnValuesNumberMismatch { .. }
|
||||
| ColumnTypeMismatch { .. }
|
||||
| InvalidSql { .. }
|
||||
| InvalidUrl { .. }
|
||||
| InvalidPath { .. }
|
||||
@@ -584,7 +564,8 @@ impl ErrorExt for Error {
|
||||
| DatabaseNotFound { .. }
|
||||
| MissingNodeId { .. }
|
||||
| MissingMetasrvOpts { .. }
|
||||
| ColumnNoneDefaultValue { .. } => StatusCode::InvalidArguments,
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| ParseUrl { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
// TODO(yingwen): Further categorize http error.
|
||||
StartServer { .. }
|
||||
@@ -592,6 +573,7 @@ impl ErrorExt for Error {
|
||||
| TcpBind { .. }
|
||||
| StartGrpc { .. }
|
||||
| CreateDir { .. }
|
||||
| RemoveDir { .. }
|
||||
| InsertSystemCatalog { .. }
|
||||
| RenameTable { .. }
|
||||
| Catalog { .. }
|
||||
@@ -613,7 +595,6 @@ impl ErrorExt for Error {
|
||||
| WriteObject { .. }
|
||||
| ListObjects { .. } => StatusCode::StorageUnavailable,
|
||||
OpenLogStore { source } => source.status_code(),
|
||||
StartScriptManager { source } => source.status_code(),
|
||||
OpenStorageEngine { source } => source.status_code(),
|
||||
RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
MetaClientInit { source, .. } => source.status_code(),
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
||||
use catalog::{region_stats, CatalogManagerRef};
|
||||
use catalog::{datanode_stat, CatalogManagerRef};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use meta_client::client::{HeartbeatSender, MetaClient};
|
||||
use snafu::ResultExt;
|
||||
@@ -106,13 +106,7 @@ impl HeartbeatTask {
|
||||
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
||||
common_runtime::spawn_bg(async move {
|
||||
while running.load(Ordering::Acquire) {
|
||||
let (region_num, region_stats) = match region_stats(&catalog_manager_clone).await {
|
||||
Ok(region_stats) => (region_stats.len() as i64, region_stats),
|
||||
Err(e) => {
|
||||
error!("failed to get region status, err: {e:?}");
|
||||
(-1, vec![])
|
||||
}
|
||||
};
|
||||
let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
|
||||
|
||||
let req = HeartbeatRequest {
|
||||
peer: Some(Peer {
|
||||
@@ -120,7 +114,7 @@ impl HeartbeatTask {
|
||||
addr: addr.clone(),
|
||||
}),
|
||||
node_stat: Some(NodeStat {
|
||||
region_num,
|
||||
region_num: region_num as _,
|
||||
..Default::default()
|
||||
}),
|
||||
region_stats,
|
||||
|
||||
@@ -23,6 +23,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::store::state_store::ObjectStateStore;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::logging::info;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
@@ -37,12 +38,14 @@ use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder
|
||||
use object_store::{util, ObjectStore, ObjectStoreBuilder};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use storage::compaction::{CompactionHandler, CompactionSchedulerRef, SimplePicker};
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::{LocalScheduler, SchedulerConfig};
|
||||
use storage::EngineImpl;
|
||||
use store_api::logstore::LogStore;
|
||||
use table::requests::FlushTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProviderRef;
|
||||
use table::Table;
|
||||
@@ -55,11 +58,9 @@ use crate::error::{
|
||||
NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result, ShutdownInstanceSnafu,
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
mod grpc;
|
||||
mod script;
|
||||
pub mod sql;
|
||||
|
||||
pub(crate) type DefaultEngine = MitoEngine<EngineImpl<RaftEngineLogStore>>;
|
||||
@@ -69,9 +70,9 @@ pub struct Instance {
|
||||
pub(crate) query_engine: QueryEngineRef,
|
||||
pub(crate) sql_handler: SqlHandler,
|
||||
pub(crate) catalog_manager: CatalogManagerRef,
|
||||
pub(crate) script_executor: ScriptExecutor,
|
||||
pub(crate) table_id_provider: Option<TableIdProviderRef>,
|
||||
pub(crate) heartbeat_task: Option<HeartbeatTask>,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
|
||||
pub type InstanceRef = Arc<Instance>;
|
||||
@@ -102,7 +103,7 @@ impl Instance {
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
compaction_scheduler: CompactionSchedulerRef<RaftEngineLogStore>,
|
||||
) -> Result<Self> {
|
||||
let object_store = new_object_store(&opts.storage).await?;
|
||||
let object_store = new_object_store(&opts.storage.store).await?;
|
||||
let log_store = Arc::new(create_log_store(&opts.wal).await?);
|
||||
|
||||
let table_engine = Arc::new(DefaultEngine::new(
|
||||
@@ -166,8 +167,6 @@ impl Instance {
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_manager.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let script_executor =
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
|
||||
|
||||
let heartbeat_task = match opts.mode {
|
||||
Mode::Standalone => None,
|
||||
@@ -181,6 +180,7 @@ impl Instance {
|
||||
};
|
||||
|
||||
let procedure_manager = create_procedure_manager(&opts.procedure).await?;
|
||||
// Register all procedures.
|
||||
if let Some(procedure_manager) = &procedure_manager {
|
||||
table_engine.register_procedure_loaders(&**procedure_manager);
|
||||
table_procedure::register_procedure_loaders(
|
||||
@@ -189,12 +189,6 @@ impl Instance {
|
||||
table_engine.clone(),
|
||||
&**procedure_manager,
|
||||
);
|
||||
|
||||
// Recover procedures.
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
@@ -202,14 +196,13 @@ impl Instance {
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
procedure_manager,
|
||||
procedure_manager.clone(),
|
||||
),
|
||||
catalog_manager,
|
||||
script_executor,
|
||||
heartbeat_task,
|
||||
table_id_provider,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -221,6 +214,15 @@ impl Instance {
|
||||
if let Some(task) = &self.heartbeat_task {
|
||||
task.start().await?;
|
||||
}
|
||||
|
||||
// Recover procedures after the catalog manager is started, so we can
|
||||
// ensure we can access all tables from the catalog manager.
|
||||
if let Some(procedure_manager) = &self.procedure_manager {
|
||||
procedure_manager
|
||||
.recover()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -233,6 +235,8 @@ impl Instance {
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
}
|
||||
|
||||
self.flush_tables().await?;
|
||||
|
||||
self.sql_handler
|
||||
.close()
|
||||
.await
|
||||
@@ -240,6 +244,43 @@ impl Instance {
|
||||
.context(ShutdownInstanceSnafu)
|
||||
}
|
||||
|
||||
pub async fn flush_tables(&self) -> Result<()> {
|
||||
info!("going to flush all schemas");
|
||||
let schema_list = self
|
||||
.catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?
|
||||
.expect("Default schema not found")
|
||||
.schema_names()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
let flush_requests = schema_list
|
||||
.into_iter()
|
||||
.map(|schema_name| {
|
||||
SqlRequest::FlushTable(FlushTableRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name,
|
||||
table_name: None,
|
||||
region_number: None,
|
||||
wait: Some(true),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let flush_result = futures::future::try_join_all(
|
||||
flush_requests
|
||||
.into_iter()
|
||||
.map(|request| self.sql_handler.execute(request, QueryContext::arc())),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu);
|
||||
info!("Flushed all tables result: {}", flush_result.is_ok());
|
||||
flush_result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sql_handler(&self) -> &SqlHandler {
|
||||
&self.sql_handler
|
||||
}
|
||||
@@ -268,11 +309,22 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
|
||||
ObjectStoreConfig::Oss { .. } => new_oss_object_store(store_config).await,
|
||||
};
|
||||
|
||||
// Don't enable retry layer when using local file backend.
|
||||
let object_store = if !matches!(store_config, ObjectStoreConfig::File(..)) {
|
||||
object_store.map(|object_store| object_store.layer(RetryLayer::new().with_jitter()))
|
||||
} else {
|
||||
object_store
|
||||
};
|
||||
|
||||
object_store.map(|object_store| {
|
||||
object_store
|
||||
.layer(RetryLayer::new().with_jitter())
|
||||
.layer(MetricsLayer)
|
||||
.layer(LoggingLayer::default())
|
||||
.layer(
|
||||
LoggingLayer::default()
|
||||
// Print the expected error only in DEBUG level.
|
||||
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
|
||||
.with_error_level(Some(log::Level::Debug)),
|
||||
)
|
||||
.layer(TracingLayer)
|
||||
})
|
||||
}
|
||||
@@ -290,18 +342,20 @@ pub(crate) async fn new_oss_object_store(store_config: &ObjectStoreConfig) -> Re
|
||||
);
|
||||
|
||||
let mut builder = OSSBuilder::default();
|
||||
let builder = builder
|
||||
builder
|
||||
.root(&root)
|
||||
.bucket(&oss_config.bucket)
|
||||
.endpoint(&oss_config.endpoint)
|
||||
.access_key_id(&oss_config.access_key_id)
|
||||
.access_key_secret(&oss_config.access_key_secret);
|
||||
|
||||
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?
|
||||
.finish();
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
create_object_store_with_cache(object_store, store_config)
|
||||
}
|
||||
|
||||
fn create_object_store_with_cache(
|
||||
@@ -354,24 +408,27 @@ pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
);
|
||||
|
||||
let mut builder = S3Builder::default();
|
||||
let mut builder = builder
|
||||
builder
|
||||
.root(&root)
|
||||
.bucket(&s3_config.bucket)
|
||||
.access_key_id(&s3_config.access_key_id)
|
||||
.secret_access_key(&s3_config.secret_access_key);
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
}
|
||||
if s3_config.region.is_some() {
|
||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||
builder.region(s3_config.region.as_ref().unwrap());
|
||||
}
|
||||
|
||||
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
create_object_store_with_cache(
|
||||
ObjectStore::new(builder)
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?
|
||||
.finish(),
|
||||
store_config,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
|
||||
@@ -385,16 +442,27 @@ pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
info!("The file storage directory is: {}", &data_dir);
|
||||
|
||||
let atomic_write_dir = format!("{data_dir}/.tmp/");
|
||||
if path::Path::new(&atomic_write_dir).exists() {
|
||||
info!(
|
||||
"Begin to clean temp storage directory: {}",
|
||||
&atomic_write_dir
|
||||
);
|
||||
fs::remove_dir_all(&atomic_write_dir).context(error::RemoveDirSnafu {
|
||||
dir: &atomic_write_dir,
|
||||
})?;
|
||||
info!("Cleaned temp storage directory: {}", &atomic_write_dir);
|
||||
}
|
||||
|
||||
let accessor = FsBuilder::default()
|
||||
.root(&data_dir)
|
||||
.atomic_write_dir(&atomic_write_dir)
|
||||
.build()
|
||||
let mut builder = FsBuilder::default();
|
||||
builder.root(&data_dir).atomic_write_dir(&atomic_write_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
})?
|
||||
.finish();
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
/// Create metasrv client instance and spawn heartbeat loop.
|
||||
@@ -460,11 +528,15 @@ pub(crate) async fn create_procedure_manager(
|
||||
);
|
||||
|
||||
let object_store = new_object_store(&procedure_config.store).await?;
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store));
|
||||
|
||||
let manager_config = ManagerConfig {
|
||||
object_store,
|
||||
max_retry_times: procedure_config.max_retry_times,
|
||||
retry_delay: procedure_config.retry_delay,
|
||||
};
|
||||
|
||||
Ok(Some(Arc::new(LocalManager::new(manager_config))))
|
||||
Ok(Some(Arc::new(LocalManager::new(
|
||||
manager_config,
|
||||
state_store,
|
||||
))))
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_query::Output;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::plan::LogicalPlan;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::statements::statement::Statement;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
@@ -53,7 +53,7 @@ impl Instance {
|
||||
.context(DecodeLogicalPlanSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&LogicalPlan::DfPlan(logical_plan))
|
||||
.execute(LogicalPlan::DfPlan(logical_plan), QueryContext::arc())
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
@@ -69,11 +69,11 @@ impl Instance {
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(stmt, ctx)
|
||||
.plan(stmt, ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.execute(plan, ctx)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
@@ -175,7 +175,7 @@ mod test {
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
engine.execute(&plan).await.unwrap()
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
|
||||
@@ -19,7 +19,6 @@ use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_telemetry::logging::info;
|
||||
use common_telemetry::timer;
|
||||
use futures::StreamExt;
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::query_engine::StatementHandler;
|
||||
@@ -28,21 +27,18 @@ use servers::prom::PromHandler;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::ast::ObjectName;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::copy::{CopyTable, CopyTableArgument};
|
||||
use sql::statements::statement::Statement;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{
|
||||
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
||||
};
|
||||
use table::requests::{CopyDirection, CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
|
||||
TableIdProviderNotFoundSnafu,
|
||||
};
|
||||
use crate::instance::Instance;
|
||||
use crate::metric;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
use crate::sql::SqlRequest;
|
||||
use crate::metrics;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
impl Instance {
|
||||
pub async fn execute_stmt(
|
||||
@@ -52,37 +48,10 @@ impl Instance {
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Insert(insert)) => {
|
||||
let requests = self
|
||||
.sql_handler
|
||||
.insert_to_requests(self.catalog_manager.clone(), *insert, query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
match requests {
|
||||
InsertRequests::Request(request) => {
|
||||
self.sql_handler.execute(request, query_ctx.clone()).await
|
||||
}
|
||||
|
||||
InsertRequests::Stream(mut s) => {
|
||||
let mut rows = 0;
|
||||
while let Some(request) = s.next().await {
|
||||
match self
|
||||
.sql_handler
|
||||
.execute(request?, query_ctx.clone())
|
||||
.await?
|
||||
{
|
||||
Output::AffectedRows(n) => {
|
||||
rows += n;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
Ok(Output::AffectedRows(rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
QueryStatement::Sql(Statement::Delete(delete)) => {
|
||||
let request = SqlRequest::Delete(*delete);
|
||||
self.sql_handler.execute(request, query_ctx).await
|
||||
let request =
|
||||
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
|
||||
.await?;
|
||||
self.sql_handler.insert(request).await
|
||||
}
|
||||
QueryStatement::Sql(Statement::CreateDatabase(create_database)) => {
|
||||
let request = CreateDatabaseRequest {
|
||||
@@ -121,6 +90,9 @@ impl Instance {
|
||||
.execute(SqlRequest::CreateTable(request), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::CreateExternalTable(_create_external_table)) => {
|
||||
unimplemented!()
|
||||
}
|
||||
QueryStatement::Sql(Statement::Alter(alter_table)) => {
|
||||
let name = alter_table.table_name().clone();
|
||||
let (catalog, schema, table) = table_idents_to_full_name(&name, query_ctx.clone())?;
|
||||
@@ -152,51 +124,63 @@ impl Instance {
|
||||
.execute(SqlRequest::ShowTables(show_tables), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::DescribeTable(describe_table)) => {
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::DescribeTable(describe_table), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||
}
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let file_name = copy_table.file_name;
|
||||
let req = CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
file_name,
|
||||
connection: copy_table.connection,
|
||||
};
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => {
|
||||
let req = match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Export,
|
||||
}
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Import,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let req = CopyTableFromRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
connection: copy_table.connection,
|
||||
pattern: copy_table.pattern,
|
||||
from: copy_table.from,
|
||||
};
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTableFrom(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
},
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Query(_))
|
||||
| QueryStatement::Sql(Statement::Explain(_))
|
||||
| QueryStatement::Sql(Statement::Use(_))
|
||||
| QueryStatement::Sql(Statement::Tql(_))
|
||||
| QueryStatement::Sql(Statement::Delete(_))
|
||||
| QueryStatement::Sql(Statement::DescribeTable(_))
|
||||
| QueryStatement::Promql(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -206,17 +190,20 @@ impl Instance {
|
||||
promql: &PromQuery,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
|
||||
let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?;
|
||||
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
// TODO(ruihang): merge this and `execute_promql` after #951 landed
|
||||
@@ -249,10 +236,13 @@ impl Instance {
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,7 +294,7 @@ impl StatementHandler for Instance {
|
||||
#[async_trait]
|
||||
impl PromHandler for Instance {
|
||||
async fn do_query(&self, query: &PromQuery) -> server_error::Result<Output> {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED);
|
||||
|
||||
self.execute_promql(query, QueryContext::arc())
|
||||
.await
|
||||
|
||||
@@ -19,9 +19,8 @@ pub mod datanode;
|
||||
pub mod error;
|
||||
mod heartbeat;
|
||||
pub mod instance;
|
||||
pub mod metric;
|
||||
pub mod metrics;
|
||||
mod mock;
|
||||
mod script;
|
||||
pub mod server;
|
||||
pub mod sql;
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -15,6 +15,4 @@
|
||||
//! datanode metrics
|
||||
|
||||
pub const METRIC_HANDLE_SQL_ELAPSED: &str = "datanode.handle_sql_elapsed";
|
||||
pub const METRIC_HANDLE_SCRIPTS_ELAPSED: &str = "datanode.handle_scripts_elapsed";
|
||||
pub const METRIC_RUN_SCRIPT_ELAPSED: &str = "datanode.run_script_elapsed";
|
||||
pub const METRIC_HANDLE_PROMQL_ELAPSED: &str = "datanode.handle_promql_elapsed";
|
||||
@@ -18,9 +18,12 @@ use std::sync::Arc;
|
||||
|
||||
use common_runtime::Builder as RuntimeBuilder;
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::http::{HttpServer, HttpServerBuilder};
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
|
||||
use servers::server::Server;
|
||||
use snafu::ResultExt;
|
||||
use tokio::select;
|
||||
|
||||
use crate::datanode::DatanodeOptions;
|
||||
use crate::error::{
|
||||
@@ -33,6 +36,7 @@ pub mod grpc;
|
||||
/// All rpc services.
|
||||
pub struct Services {
|
||||
grpc_server: GrpcServer,
|
||||
http_server: HttpServer,
|
||||
}
|
||||
|
||||
impl Services {
|
||||
@@ -51,6 +55,9 @@ impl Services {
|
||||
None,
|
||||
grpc_runtime,
|
||||
),
|
||||
http_server: HttpServerBuilder::new(opts.http_opts.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.build(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -58,10 +65,15 @@ impl Services {
|
||||
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.rpc_addr,
|
||||
})?;
|
||||
self.grpc_server
|
||||
.start(grpc_addr)
|
||||
.await
|
||||
.context(StartServerSnafu)?;
|
||||
let http_addr = opts.http_opts.addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.http_opts.addr,
|
||||
})?;
|
||||
let grpc = self.grpc_server.start(grpc_addr);
|
||||
let http = self.http_server.start(http_addr);
|
||||
select!(
|
||||
v = grpc => v.context(StartServerSnafu)?,
|
||||
v = http => v.context(StartServerSnafu)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -69,6 +81,11 @@ impl Services {
|
||||
self.grpc_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownServerSnafu)
|
||||
.context(ShutdownServerSnafu)?;
|
||||
self.http_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,6 +95,7 @@ impl Instance {
|
||||
schema_name: expr.schema_name,
|
||||
table_name,
|
||||
region_number: expr.region_id,
|
||||
wait: None,
|
||||
};
|
||||
self.sql_handler()
|
||||
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||
|
||||
@@ -17,34 +17,29 @@ use common_error::prelude::BoxedError;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use query::query_engine::QueryEngineRef;
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use query::sql::{show_databases, show_tables};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
self, CloseTableEngineSnafu, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu,
|
||||
CloseTableEngineSnafu, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
|
||||
mod alter;
|
||||
mod copy_table;
|
||||
mod copy_table_from;
|
||||
mod copy_table_to;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
mod flush_table;
|
||||
pub(crate) mod insert;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SqlRequest {
|
||||
Insert(InsertRequest),
|
||||
CreateTable(CreateTableRequest),
|
||||
CreateDatabase(CreateDatabaseRequest),
|
||||
Alter(AlterTableRequest),
|
||||
@@ -52,17 +47,14 @@ pub enum SqlRequest {
|
||||
FlushTable(FlushTableRequest),
|
||||
ShowDatabases(ShowDatabases),
|
||||
ShowTables(ShowTables),
|
||||
DescribeTable(DescribeTable),
|
||||
Delete(Delete),
|
||||
CopyTable(CopyTableRequest),
|
||||
CopyTableFrom(CopyTableFromRequest),
|
||||
}
|
||||
|
||||
// Handler to execute SQL except query
|
||||
#[derive(Clone)]
|
||||
pub struct SqlHandler {
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
@@ -71,14 +63,12 @@ impl SqlHandler {
|
||||
pub fn new(
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
catalog_manager,
|
||||
query_engine,
|
||||
engine_procedure,
|
||||
procedure_manager,
|
||||
}
|
||||
@@ -90,14 +80,14 @@ impl SqlHandler {
|
||||
// there, instead of executing here in a "static" fashion.
|
||||
pub async fn execute(&self, request: SqlRequest, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let result = match request {
|
||||
SqlRequest::Insert(req) => self.insert(req).await,
|
||||
SqlRequest::CreateTable(req) => self.create_table(req).await,
|
||||
SqlRequest::CreateDatabase(req) => self.create_database(req, query_ctx.clone()).await,
|
||||
SqlRequest::Alter(req) => self.alter(req).await,
|
||||
SqlRequest::DropTable(req) => self.drop_table(req).await,
|
||||
SqlRequest::Delete(req) => self.delete(query_ctx.clone(), req).await,
|
||||
SqlRequest::CopyTable(req) => self.copy_table(req).await,
|
||||
SqlRequest::CopyTableFrom(req) => self.copy_table_from(req).await,
|
||||
SqlRequest::CopyTable(req) => match req.direction {
|
||||
CopyDirection::Export => self.copy_table_to(req).await,
|
||||
CopyDirection::Import => self.copy_table_from(req).await,
|
||||
},
|
||||
SqlRequest::ShowDatabases(req) => {
|
||||
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
@@ -105,19 +95,6 @@ impl SqlHandler {
|
||||
show_tables(req, self.catalog_manager.clone(), query_ctx.clone())
|
||||
.context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::DescribeTable(req) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(req.name(), query_ctx.clone())?;
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&catalog, &schema, &table)
|
||||
.await
|
||||
.context(error::CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: req.name().to_string(),
|
||||
})?;
|
||||
describe_table(table).context(ExecuteSqlSnafu)
|
||||
}
|
||||
SqlRequest::FlushTable(req) => self.flush_table(req).await,
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
@@ -149,239 +126,3 @@ impl SqlHandler {
|
||||
.context(CloseTableEngineSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use futures::StreamExt;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use session::context::QueryContext;
|
||||
use sql::statements::statement::Statement;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::error::Result as TableResult;
|
||||
use table::metadata::TableInfoRef;
|
||||
use table::Table;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
|
||||
struct DemoTable;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for DemoTable {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
.with_time_index(true),
|
||||
];
|
||||
|
||||
Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> TableResult<PhysicalPlanRef> {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_statement_to_request() {
|
||||
let dir = create_temp_dir("setup_test_engine_and_table");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = Builder::default().root(&store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let sql = r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#;
|
||||
|
||||
let table_engine = Arc::new(MitoEngine::<EngineImpl<NoopLogStore>>::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
|
||||
let catalog_list = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
catalog_list.start().await.unwrap();
|
||||
assert!(catalog_list
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
table_id: 1,
|
||||
table: Arc::new(DemoTable),
|
||||
})
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_list.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let sql_handler = SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_list.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
None,
|
||||
);
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Request(SqlRequest::Insert(req)) => {
|
||||
assert_eq!(req.table_name, "demo");
|
||||
let columns_values = req.columns_values;
|
||||
assert_eq!(4, columns_values.len());
|
||||
|
||||
let hosts = &columns_values["host"];
|
||||
assert_eq!(2, hosts.len());
|
||||
assert_eq!(Value::from("host1"), hosts.get(0));
|
||||
assert_eq!(Value::from("host2"), hosts.get(1));
|
||||
|
||||
let cpus = &columns_values["cpu"];
|
||||
assert_eq!(2, cpus.len());
|
||||
assert_eq!(Value::from(66.6f64), cpus.get(0));
|
||||
assert_eq!(Value::from(88.8f64), cpus.get(1));
|
||||
|
||||
let memories = &columns_values["memory"];
|
||||
assert_eq!(2, memories.len());
|
||||
assert_eq!(Value::from(1024f64), memories.get(0));
|
||||
assert_eq!(Value::from(333.3f64), memories.get(1));
|
||||
|
||||
let ts = &columns_values["ts"];
|
||||
assert_eq!(2, ts.len());
|
||||
assert_eq!(
|
||||
Value::from(Timestamp::new_millisecond(1655276557000i64)),
|
||||
ts.get(0)
|
||||
);
|
||||
assert_eq!(
|
||||
Value::from(Timestamp::new_millisecond(1655276558000i64)),
|
||||
ts.get(1)
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
panic!("Not supposed to reach here")
|
||||
}
|
||||
}
|
||||
|
||||
// test inert into select
|
||||
|
||||
// type mismatch
|
||||
let sql = "insert into demo(ts) select number from numbers limit 3";
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Stream(mut stream) => {
|
||||
assert!(matches!(
|
||||
stream.next().await.unwrap().unwrap_err(),
|
||||
Error::ColumnTypeMismatch { .. }
|
||||
));
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let sql = "insert into demo(cpu) select cast(number as double) from numbers limit 3";
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Stream(mut stream) => {
|
||||
let mut times = 0;
|
||||
while let Some(Ok(SqlRequest::Insert(req))) = stream.next().await {
|
||||
times += 1;
|
||||
assert_eq!(req.table_name, "demo");
|
||||
let columns_values = req.columns_values;
|
||||
assert_eq!(1, columns_values.len());
|
||||
|
||||
let memories = &columns_values["cpu"];
|
||||
assert_eq!(3, memories.len());
|
||||
assert_eq!(Value::from(0.0f64), memories.get(0));
|
||||
assert_eq!(Value::from(1.0f64), memories.get(1));
|
||||
assert_eq!(Value::from(2.0f64), memories.get(2));
|
||||
}
|
||||
assert_eq!(1, times);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
use datafusion::parquet::arrow::ArrowWriter;
|
||||
use datafusion::parquet::basic::{Compression, Encoding};
|
||||
use datafusion::parquet::file::properties::WriterProperties;
|
||||
use datafusion::physical_plan::RecordBatchStream;
|
||||
use futures::TryStreamExt;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use super::copy_table_from::{build_fs_backend, build_s3_backend, S3_SCHEMA};
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
fn build_backend(
|
||||
&self,
|
||||
url: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<(ObjectStore, String)> {
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let host = url.host_str();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let object_store = build_s3_backend(host, "/", connection)?;
|
||||
Ok((object_store, path.to_string()))
|
||||
}
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
let object_store = build_fs_backend("/")?;
|
||||
Ok((object_store, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
}),
|
||||
}
|
||||
}
|
||||
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let stream = table
|
||||
.scan(None, &[], None)
|
||||
.await
|
||||
.with_context(|_| error::CopyTableSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let stream = stream
|
||||
.execute(0, SessionContext::default().task_ctx())
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
||||
|
||||
let (object_store, file_name) = self.build_backend(&req.file_name, req.connection)?;
|
||||
|
||||
let mut parquet_writer = ParquetWriter::new(file_name, stream, object_store);
|
||||
// TODO(jiachun):
|
||||
// For now, COPY is implemented synchronously.
|
||||
// When copying large table, it will be blocked for a long time.
|
||||
// Maybe we should make "copy" runs in background?
|
||||
// Like PG: https://www.postgresql.org/docs/current/sql-copy.html
|
||||
let rows = parquet_writer.flush().await?;
|
||||
|
||||
Ok(Output::AffectedRows(rows))
|
||||
}
|
||||
}
|
||||
|
||||
type DfRecordBatchStream = Pin<Box<DfRecordBatchStreamAdapter>>;
|
||||
|
||||
struct ParquetWriter {
|
||||
file_name: String,
|
||||
stream: DfRecordBatchStream,
|
||||
object_store: ObjectStore,
|
||||
max_row_group_size: usize,
|
||||
max_rows_in_segment: usize,
|
||||
}
|
||||
|
||||
impl ParquetWriter {
|
||||
pub fn new(file_name: String, stream: DfRecordBatchStream, object_store: ObjectStore) -> Self {
|
||||
Self {
|
||||
file_name,
|
||||
stream,
|
||||
object_store,
|
||||
// TODO(jiachun): make these configurable: WITH (max_row_group_size=xxx, max_rows_in_segment=xxx)
|
||||
max_row_group_size: 4096,
|
||||
max_rows_in_segment: 5000000, // default 5M rows per segment
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn flush(&mut self) -> Result<usize> {
|
||||
let schema = self.stream.as_ref().schema();
|
||||
let writer_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD)
|
||||
.set_encoding(Encoding::PLAIN)
|
||||
.set_max_row_group_size(self.max_row_group_size)
|
||||
.build();
|
||||
let mut total_rows = 0;
|
||||
loop {
|
||||
let mut buf = vec![];
|
||||
let mut arrow_writer =
|
||||
ArrowWriter::try_new(&mut buf, schema.clone(), Some(writer_props.clone()))
|
||||
.context(error::WriteParquetSnafu)?;
|
||||
|
||||
let mut rows = 0;
|
||||
let mut end_loop = true;
|
||||
// TODO(hl & jiachun): Since OpenDAL's writer is async and ArrowWriter requires a `std::io::Write`,
|
||||
// here we use a Vec<u8> to buffer all parquet bytes in memory and write to object store
|
||||
// at a time. Maybe we should find a better way to bridge ArrowWriter and OpenDAL's object.
|
||||
while let Some(batch) = self
|
||||
.stream
|
||||
.try_next()
|
||||
.await
|
||||
.context(error::PollStreamSnafu)?
|
||||
{
|
||||
arrow_writer
|
||||
.write(&batch)
|
||||
.context(error::WriteParquetSnafu)?;
|
||||
rows += batch.num_rows();
|
||||
if rows >= self.max_rows_in_segment {
|
||||
end_loop = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let start_row_num = total_rows + 1;
|
||||
total_rows += rows;
|
||||
arrow_writer.close().context(error::WriteParquetSnafu)?;
|
||||
|
||||
// if rows == 0, we just end up with an empty file.
|
||||
//
|
||||
// file_name like:
|
||||
// "file_name_1_1000000" (row num: 1 ~ 1000000),
|
||||
// "file_name_1000001_xxx" (row num: 1000001 ~ xxx)
|
||||
let file_name = format!("{}_{}_{}", self.file_name, start_row_num, total_rows);
|
||||
let object = self.object_store.object(&file_name);
|
||||
object.write(buf).await.context(error::WriteObjectSnafu {
|
||||
path: object.path(),
|
||||
})?;
|
||||
|
||||
if end_loop {
|
||||
return Ok(total_rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,35 +15,26 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_compat::CompatExt;
|
||||
use common_datasource::lister::{Lister, Source};
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_datasource::util::find_dir_and_filename;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::DataTypesSnafu;
|
||||
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
|
||||
use datatypes::arrow::record_batch::RecordBatch;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use futures::future;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::{Object, ObjectStore, ObjectStoreBuilder};
|
||||
use regex::Regex;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{CopyTableFromRequest, InsertRequest};
|
||||
use table::requests::{CopyTableRequest, InsertRequest};
|
||||
use tokio::io::BufReader;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableFromRequest) -> Result<Output> {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
@@ -51,16 +42,37 @@ impl SqlHandler {
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let datasource = DataSource::new(&req.from, req.pattern, req.connection)?;
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
|
||||
let objects = datasource.list().await?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let (dir, filename) = find_dir_and_filename(&path);
|
||||
let regex = req
|
||||
.pattern
|
||||
.as_ref()
|
||||
.map(|x| Regex::new(x))
|
||||
.transpose()
|
||||
.context(error::BuildRegexSnafu)?;
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let lister = Lister::new(object_store.clone(), source, dir, regex);
|
||||
|
||||
let entries = lister.list().await.context(error::ListObjectsSnafu)?;
|
||||
|
||||
let mut buf: Vec<RecordBatch> = Vec::new();
|
||||
|
||||
for obj in objects.iter() {
|
||||
let reader = obj.reader().await.context(error::ReadObjectSnafu {
|
||||
path: &obj.path().to_string(),
|
||||
})?;
|
||||
for entry in entries.iter() {
|
||||
let path = entry.path();
|
||||
let reader = object_store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let buf_reader = BufReader::new(reader.compat());
|
||||
|
||||
@@ -131,321 +143,3 @@ impl SqlHandler {
|
||||
Ok(Output::AffectedRows(result.iter().sum()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
struct DataSource {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl DataSource {
|
||||
fn from_path(url: &str, regex: Option<Regex>) -> Result<DataSource> {
|
||||
let result = if url.ends_with('/') {
|
||||
Url::from_directory_path(url)
|
||||
} else {
|
||||
Url::from_file_path(url)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let path = url.path();
|
||||
|
||||
let (path, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = build_fs_backend(&path)?;
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
Err(()) => error::InvalidPathSnafu {
|
||||
path: url.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_url(
|
||||
url: Url,
|
||||
regex: Option<Regex>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let host = url.host_str();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let (dir, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => build_s3_backend(host, &dir, connection)?,
|
||||
_ => {
|
||||
return error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path: dir,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
url: &str,
|
||||
pattern: Option<String>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let regex = if let Some(pattern) = pattern {
|
||||
let regex = Regex::new(&pattern).context(error::BuildRegexSnafu)?;
|
||||
Some(regex)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => DataSource::from_url(url, regex, connection),
|
||||
Err(err) => {
|
||||
if ParseError::RelativeUrlWithoutBase == err {
|
||||
DataSource::from_path(url, regex)
|
||||
} else {
|
||||
Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Object>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.object("/")
|
||||
.list()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = if let Some(regex) = &self.regex {
|
||||
regex.is_match(f.name())
|
||||
} else {
|
||||
true
|
||||
};
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
let obj = self.object_store.object(filename);
|
||||
|
||||
Ok(vec![obj])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: Option<&str>,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
if let Some(bucket) = host {
|
||||
builder.bucket(bucket);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let accessor = Fs::default()
|
||||
.root(root)
|
||||
.build()
|
||||
.context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = DataSource::find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
63
src/datanode/src/sql/copy_table_to.rs
Normal file
63
src/datanode/src/sql/copy_table_to.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_datasource;
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use snafu::ResultExt;
|
||||
use storage::sst::SstInfo;
|
||||
use storage::{ParquetWriter, Source};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
|
||||
use crate::error::{self, Result, WriteParquetSnafu};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table_to(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
table: &req.table_name,
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let stream = table
|
||||
.scan(None, &[], None)
|
||||
.await
|
||||
.with_context(|_| error::CopyTableSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let stream = stream
|
||||
.execute(0, SessionContext::default().task_ctx())
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let writer = ParquetWriter::new(&path, Source::Stream(stream), object_store);
|
||||
|
||||
let rows_copied = writer
|
||||
.write_sst(&storage::sst::WriteOptions::default())
|
||||
.await
|
||||
.context(WriteParquetSnafu)?
|
||||
.map(|SstInfo { num_rows, .. }| num_rows)
|
||||
.unwrap_or(0);
|
||||
|
||||
Ok(Output::AffectedRows(rows_copied))
|
||||
}
|
||||
}
|
||||
@@ -313,13 +313,15 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::Schema;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use session::context::QueryContext;
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::tests::test_util::create_mock_sql_handler;
|
||||
use crate::tests::test_util::{create_mock_sql_handler, MockInstance};
|
||||
|
||||
fn sql_to_statement(sql: &str) -> CreateTable {
|
||||
let mut res = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
|
||||
@@ -522,4 +524,42 @@ mod tests {
|
||||
schema.column_schema_by_name("memory").unwrap().data_type
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn create_table_by_procedure() {
|
||||
let instance = MockInstance::with_procedure_enabled("create_table_by_procedure").await;
|
||||
|
||||
let sql = r#"create table test_table(
|
||||
host string,
|
||||
ts timestamp,
|
||||
cpu double default 0,
|
||||
memory double,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);"#;
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let output = instance
|
||||
.inner()
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
// create if not exists
|
||||
let sql = r#"create table if not exists test_table(
|
||||
host string,
|
||||
ts timestamp,
|
||||
cpu double default 0,
|
||||
memory double,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(host)
|
||||
) engine=mito with(regions=1);"#;
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let output = instance
|
||||
.inner()
|
||||
.execute_stmt(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::vectors::StringVector;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::ast::{BinaryOperator, Expr, Value};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::sql_value_to_value;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::DeleteRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{ColumnNotFoundSnafu, DeleteSnafu, InvalidSqlSnafu, NotSupportSqlSnafu, Result};
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn delete(&self, query_ctx: QueryContextRef, stmt: Delete) -> Result<Output> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
|
||||
let table_ref = TableReference {
|
||||
catalog: &catalog_name.to_string(),
|
||||
schema: &schema_name.to_string(),
|
||||
table: &table_name.to_string(),
|
||||
};
|
||||
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let req = DeleteRequest {
|
||||
key_column_values: parse_selection(stmt.selection(), &table)?,
|
||||
};
|
||||
|
||||
let affected_rows = table.delete(req).await.with_context(|_| DeleteSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
Ok(Output::AffectedRows(affected_rows))
|
||||
}
|
||||
}
|
||||
|
||||
/// parse selection, currently supported format is `tagkey1 = 'tagvalue1' and 'ts' = 'value'`.
|
||||
/// (only uses =, and in the where clause and provides all columns needed by the key.)
|
||||
fn parse_selection(
|
||||
selection: &Option<Expr>,
|
||||
table: &TableRef,
|
||||
) -> Result<HashMap<String, VectorRef>> {
|
||||
let mut key_column_values = HashMap::new();
|
||||
if let Some(expr) = selection {
|
||||
parse_expr(expr, &mut key_column_values, table)?;
|
||||
}
|
||||
Ok(key_column_values)
|
||||
}
|
||||
|
||||
fn parse_expr(
|
||||
expr: &Expr,
|
||||
key_column_values: &mut HashMap<String, VectorRef>,
|
||||
table: &TableRef,
|
||||
) -> Result<()> {
|
||||
// match BinaryOp
|
||||
if let Expr::BinaryOp { left, op, right } = expr {
|
||||
match (&**left, op, &**right) {
|
||||
// match And operator
|
||||
(Expr::BinaryOp { .. }, BinaryOperator::And, Expr::BinaryOp { .. }) => {
|
||||
parse_expr(left, key_column_values, table)?;
|
||||
parse_expr(right, key_column_values, table)?;
|
||||
return Ok(());
|
||||
}
|
||||
// match Eq operator
|
||||
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Value(value)) => {
|
||||
key_column_values.insert(
|
||||
column_name.to_string(),
|
||||
value_to_vector(&column_name.to_string(), value, table)?,
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Identifier(value)) => {
|
||||
key_column_values.insert(
|
||||
column_name.to_string(),
|
||||
Arc::new(StringVector::from(vec![value.to_string()])),
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
NotSupportSqlSnafu {
|
||||
msg: format!(
|
||||
"Not support sql expr:{expr},correct format is tagkey1 = tagvalue1 and ts = value"
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// parse value to vector
|
||||
fn value_to_vector(column_name: &String, sql_value: &Value, table: &TableRef) -> Result<VectorRef> {
|
||||
let schema = table.schema();
|
||||
let column_schema =
|
||||
schema
|
||||
.column_schema_by_name(column_name)
|
||||
.with_context(|| ColumnNotFoundSnafu {
|
||||
table_name: table.table_info().name.clone(),
|
||||
column_name: column_name.to_string(),
|
||||
})?;
|
||||
let data_type = &column_schema.data_type;
|
||||
let value = sql_value_to_value(column_name, data_type, sql_value);
|
||||
match value {
|
||||
Ok(value) => {
|
||||
let mut vec = data_type.create_mutable_vector(1);
|
||||
if vec.try_push_value_ref(value.as_value_ref()).is_err() {
|
||||
return InvalidSqlSnafu {
|
||||
msg: format!(
|
||||
"invalid sql, column name is {column_name}, value is {sql_value}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Ok(vec.to_vector())
|
||||
}
|
||||
_ => InvalidSqlSnafu {
|
||||
msg: format!("invalid sql, column name is {column_name}, value is {sql_value}",),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::SchemaProviderRef;
|
||||
use common_query::Output;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::FlushTableRequest;
|
||||
|
||||
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
|
||||
@@ -22,32 +22,22 @@ use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(schema, table, req.region_number, req.wait)
|
||||
.await?;
|
||||
} else {
|
||||
let all_table_names = schema.table_names().context(CatalogSnafu)?;
|
||||
futures::future::join_all(all_table_names.iter().map(|table| {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
self.flush_table_inner(schema.clone(), table, req.region_number, req.wait)
|
||||
}))
|
||||
.await
|
||||
.into_iter()
|
||||
@@ -58,21 +48,18 @@ impl SqlHandler {
|
||||
|
||||
async fn flush_table_inner(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
schema: SchemaProviderRef,
|
||||
table_name: &str,
|
||||
region: Option<u32>,
|
||||
wait: Option<bool>,
|
||||
) -> Result<()> {
|
||||
let table_ref = TableReference {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
};
|
||||
|
||||
let full_table_name = table_ref.to_string();
|
||||
let table = self.get_table(&table_ref)?;
|
||||
table.flush(region).await.context(error::FlushTableSnafu {
|
||||
table_name: full_table_name,
|
||||
})
|
||||
schema
|
||||
.table(table_name)
|
||||
.await
|
||||
.context(error::FindTableSnafu { table_name })?
|
||||
.context(error::TableNotFoundSnafu { table_name })?
|
||||
.flush(region, wait)
|
||||
.await
|
||||
.context(error::FlushTableSnafu { table_name })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,49 +11,31 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datafusion_expr::type_coercion::binary::coerce_types;
|
||||
use datafusion_expr::Operator;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::vectors::MutableVector;
|
||||
use futures::stream::{self, StreamExt};
|
||||
use futures::Stream;
|
||||
use query::parser::QueryStatement;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
use sql::statements::insert::Insert;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::{self};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
|
||||
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
|
||||
ExecuteLogicalPlanSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||
ParseSqlValueSnafu, PlanStatementSnafu, Result, TableNotFoundSnafu,
|
||||
CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
|
||||
ColumnValuesNumberMismatchSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||
ParseSqlValueSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler};
|
||||
|
||||
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
|
||||
|
||||
type InsertRequestStream = Pin<Box<dyn Stream<Item = Result<SqlRequest>> + Send>>;
|
||||
pub(crate) enum InsertRequests {
|
||||
// Single request
|
||||
Request(SqlRequest),
|
||||
// Streaming requests
|
||||
Stream(InsertRequestStream),
|
||||
}
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn insert(&self, req: InsertRequest) -> Result<Output> {
|
||||
// FIXME(dennis): table_ref is used in InsertSnafu and the req is consumed
|
||||
@@ -77,7 +59,7 @@ impl SqlHandler {
|
||||
table_ref: TableReference,
|
||||
table: &TableRef,
|
||||
stmt: Insert,
|
||||
) -> Result<SqlRequest> {
|
||||
) -> Result<InsertRequest> {
|
||||
let values = stmt
|
||||
.values_body()
|
||||
.context(ParseSqlValueSnafu)?
|
||||
@@ -129,7 +111,7 @@ impl SqlHandler {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(SqlRequest::Insert(InsertRequest {
|
||||
Ok(InsertRequest {
|
||||
catalog_name: table_ref.catalog.to_string(),
|
||||
schema_name: table_ref.schema.to_string(),
|
||||
table_name: table_ref.table.to_string(),
|
||||
@@ -138,150 +120,14 @@ impl SqlHandler {
|
||||
.map(|(cs, mut b)| (cs.name.to_string(), b.to_vector()))
|
||||
.collect(),
|
||||
region_number: 0,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
fn build_request_from_batch(
|
||||
stmt: Insert,
|
||||
table: TableRef,
|
||||
batch: RecordBatch,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<SqlRequest> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
|
||||
|
||||
let schema = table.schema();
|
||||
let columns: Vec<_> = if stmt.columns().is_empty() {
|
||||
schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.map(|c| c.name.to_string())
|
||||
.collect()
|
||||
} else {
|
||||
stmt.columns().iter().map(|c| (*c).clone()).collect()
|
||||
};
|
||||
let columns_num = columns.len();
|
||||
|
||||
ensure!(
|
||||
batch.num_columns() == columns_num,
|
||||
ColumnValuesNumberMismatchSnafu {
|
||||
columns: columns_num,
|
||||
values: batch.num_columns(),
|
||||
}
|
||||
);
|
||||
|
||||
let batch_schema = &batch.schema;
|
||||
let batch_columns = batch_schema.column_schemas();
|
||||
assert_eq!(batch_columns.len(), columns_num);
|
||||
let mut columns_values = HashMap::with_capacity(columns_num);
|
||||
|
||||
for (i, column_name) in columns.into_iter().enumerate() {
|
||||
let column_schema = schema
|
||||
.column_schema_by_name(&column_name)
|
||||
.with_context(|| ColumnNotFoundSnafu {
|
||||
table_name: &table_name,
|
||||
column_name: &column_name,
|
||||
})?;
|
||||
let expect_datatype = column_schema.data_type.as_arrow_type();
|
||||
// It's safe to retrieve the column schema by index, we already
|
||||
// check columns number is the same above.
|
||||
let batch_datatype = batch_columns[i].data_type.as_arrow_type();
|
||||
let coerced_type = coerce_types(&expect_datatype, &Operator::Eq, &batch_datatype)
|
||||
.map_err(|_| Error::ColumnTypeMismatch {
|
||||
column: column_name.clone(),
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
})?;
|
||||
|
||||
ensure!(
|
||||
expect_datatype == coerced_type,
|
||||
ColumnTypeMismatchSnafu {
|
||||
column: column_name,
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
}
|
||||
);
|
||||
let vector = batch
|
||||
.column(i)
|
||||
.cast(&column_schema.data_type)
|
||||
.map_err(|_| Error::ColumnTypeMismatch {
|
||||
column: column_name.clone(),
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
})?;
|
||||
|
||||
columns_values.insert(column_name, vector);
|
||||
}
|
||||
|
||||
Ok(SqlRequest::Insert(InsertRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
columns_values,
|
||||
region_number: 0,
|
||||
}))
|
||||
}
|
||||
|
||||
// FIXME(dennis): move it to frontend when refactor is done.
|
||||
async fn build_stream_from_query(
|
||||
&self,
|
||||
table: TableRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequestStream> {
|
||||
let query = stmt
|
||||
.query_body()
|
||||
.context(ParseSqlValueSnafu)?
|
||||
.context(MissingInsertBodySnafu)?;
|
||||
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(
|
||||
QueryStatement::Sql(Statement::Query(Box::new(query))),
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let output = self
|
||||
.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)?;
|
||||
|
||||
let stream: InsertRequestStream = match output {
|
||||
Output::RecordBatches(batches) => {
|
||||
Box::pin(stream::iter(batches.take()).map(move |batch| {
|
||||
Self::build_request_from_batch(
|
||||
stmt.clone(),
|
||||
table.clone(),
|
||||
batch,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
Output::Stream(stream) => Box::pin(stream.map(move |batch| {
|
||||
Self::build_request_from_batch(
|
||||
stmt.clone(),
|
||||
table.clone(),
|
||||
batch.context(CollectRecordsSnafu)?,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
})),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
pub(crate) async fn insert_to_requests(
|
||||
&self,
|
||||
pub async fn insert_to_request(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequests> {
|
||||
) -> Result<InsertRequest> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx.clone())?;
|
||||
|
||||
@@ -293,16 +139,8 @@ impl SqlHandler {
|
||||
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
})?;
|
||||
|
||||
if stmt.is_insert_select() {
|
||||
Ok(InsertRequests::Stream(
|
||||
self.build_stream_from_query(table, stmt, query_ctx).await?,
|
||||
))
|
||||
} else {
|
||||
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
|
||||
Ok(InsertRequests::Request(Self::build_request_from_values(
|
||||
table_ref, &table, stmt,
|
||||
)?))
|
||||
}
|
||||
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
|
||||
Self::build_request_from_values(table_ref, &table, stmt)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,5 @@
|
||||
// limitations under the License.
|
||||
|
||||
// TODO(LFC): These tests should be moved to frontend crate. They are actually standalone instance tests.
|
||||
mod instance_test;
|
||||
mod promql_test;
|
||||
pub(crate) mod test_util;
|
||||
|
||||
@@ -24,7 +24,6 @@ use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use mito::config::EngineConfig;
|
||||
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::ResultExt;
|
||||
@@ -33,7 +32,9 @@ use sql::statements::tql::Tql;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
|
||||
use crate::datanode::{DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig, WalConfig};
|
||||
use crate::datanode::{
|
||||
DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig, StorageConfig, WalConfig,
|
||||
};
|
||||
use crate::error::{CreateTableSnafu, Result};
|
||||
use crate::instance::Instance;
|
||||
use crate::sql::SqlHandler;
|
||||
@@ -87,7 +88,7 @@ impl MockInstance {
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let plan = planner.plan(stmt, QueryContext::arc()).await.unwrap();
|
||||
engine.execute(&plan).await.unwrap()
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => {
|
||||
let plan = match tql {
|
||||
@@ -103,7 +104,7 @@ impl MockInstance {
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
engine.execute(&plan).await.unwrap()
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
_ => self
|
||||
.inner()
|
||||
@@ -116,10 +117,6 @@ impl MockInstance {
|
||||
pub(crate) fn inner(&self) -> &Instance {
|
||||
&self.instance
|
||||
}
|
||||
|
||||
pub(crate) fn data_tmp_dir(&self) -> &TempDir {
|
||||
&self._guard._data_tmp_dir
|
||||
}
|
||||
}
|
||||
|
||||
struct TestGuard {
|
||||
@@ -135,9 +132,12 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
|
||||
dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
storage: ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
store: ObjectStoreConfig::File(FileConfig {
|
||||
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
mode: Mode::Standalone,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -205,33 +205,13 @@ pub async fn create_mock_sql_handler() -> SqlHandler {
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let catalog_list = catalog::local::new_memory_catalog_list().unwrap();
|
||||
let factory = QueryEngineFactory::new(catalog_list);
|
||||
|
||||
SqlHandler::new(
|
||||
mock_engine.clone(),
|
||||
catalog_manager,
|
||||
factory.query_engine(),
|
||||
mock_engine,
|
||||
None,
|
||||
)
|
||||
SqlHandler::new(mock_engine.clone(), catalog_manager, mock_engine, None)
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_test_instance(test_name: &str) -> MockInstance {
|
||||
MockInstance::new(test_name).await
|
||||
}
|
||||
|
||||
pub async fn check_output_stream(output: Output, expected: String) {
|
||||
let recordbatches = match output {
|
||||
Output::Stream(stream) => util::collect_batches(stream).await.unwrap(),
|
||||
Output::RecordBatches(recordbatches) => recordbatches,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let pretty_print = recordbatches.pretty_print().unwrap();
|
||||
assert_eq!(pretty_print, expected, "{}", pretty_print);
|
||||
}
|
||||
|
||||
pub async fn check_unordered_output_stream(output: Output, expected: String) {
|
||||
let sort_table = |table: String| -> String {
|
||||
let replaced = table.replace("\\n", "\n");
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use arrow::array::ArrayData;
|
||||
use arrow::bitmap::Bitmap;
|
||||
use arrow::buffer::NullBuffer;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum ValidityKind<'a> {
|
||||
/// Whether the array slot is valid or not (null).
|
||||
Slots {
|
||||
bitmap: &'a Bitmap,
|
||||
bitmap: &'a NullBuffer,
|
||||
len: usize,
|
||||
null_count: usize,
|
||||
},
|
||||
@@ -38,7 +38,7 @@ pub struct Validity<'a> {
|
||||
impl<'a> Validity<'a> {
|
||||
/// Creates a `Validity` from [`ArrayData`].
|
||||
pub fn from_array_data(data: &'a ArrayData) -> Validity<'a> {
|
||||
match data.null_bitmap() {
|
||||
match data.nulls() {
|
||||
Some(bitmap) => Validity {
|
||||
kind: ValidityKind::Slots {
|
||||
bitmap,
|
||||
@@ -67,7 +67,7 @@ impl<'a> Validity<'a> {
|
||||
/// Returns whether `i-th` bit is set.
|
||||
pub fn is_set(&self, i: usize) -> bool {
|
||||
match self.kind {
|
||||
ValidityKind::Slots { bitmap, .. } => bitmap.is_set(i),
|
||||
ValidityKind::Slots { bitmap, .. } => bitmap.is_valid(i),
|
||||
ValidityKind::AllValid { len } => i < len,
|
||||
ValidityKind::AllNull { .. } => false,
|
||||
}
|
||||
|
||||
@@ -4,6 +4,10 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["python"]
|
||||
python = ["dep:script"]
|
||||
|
||||
[dependencies]
|
||||
api = { path = "../api" }
|
||||
async-stream.workspace = true
|
||||
@@ -30,12 +34,14 @@ futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
itertools = "0.10"
|
||||
meta-client = { path = "../meta-client" }
|
||||
mito = { path = "../mito", features = ["test"] }
|
||||
moka = { version = "0.9", features = ["future"] }
|
||||
openmetrics-parser = "0.4"
|
||||
partition = { path = "../partition" }
|
||||
prost.workspace = true
|
||||
query = { path = "../query" }
|
||||
rustls = "0.20"
|
||||
script = { path = "../script", features = ["python"], optional = true }
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
servers = { path = "../servers" }
|
||||
@@ -56,3 +62,4 @@ meta-srv = { path = "../meta-srv", features = ["mock"] }
|
||||
strfmt = "0.2"
|
||||
toml = "0.5"
|
||||
tower = "0.4"
|
||||
uuid.workspace = true
|
||||
|
||||
@@ -16,8 +16,12 @@ use std::any::Any;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::CreateTableExpr;
|
||||
use async_trait::async_trait;
|
||||
use catalog::error::{self as catalog_err, InvalidCatalogValueSnafu, Result as CatalogResult};
|
||||
use catalog::error::{
|
||||
self as catalog_err, InternalSnafu, InvalidCatalogValueSnafu, InvalidSystemTableDefSnafu,
|
||||
Result as CatalogResult, UnimplementedSnafu,
|
||||
};
|
||||
use catalog::helper::{
|
||||
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, SchemaKey,
|
||||
TableGlobalKey, TableGlobalValue,
|
||||
@@ -28,6 +32,7 @@ use catalog::{
|
||||
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
|
||||
SchemaProvider, SchemaProviderRef,
|
||||
};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_telemetry::error;
|
||||
use futures::StreamExt;
|
||||
use meta_client::rpc::TableName;
|
||||
@@ -36,6 +41,8 @@ use snafu::prelude::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::expr_factory;
|
||||
use crate::instance::distributed::DistInstance;
|
||||
use crate::table::DistTable;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -43,6 +50,12 @@ pub struct FrontendCatalogManager {
|
||||
backend: KvBackendRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
datanode_clients: Arc<DatanodeClients>,
|
||||
|
||||
// TODO(LFC): Remove this field.
|
||||
// DistInstance in FrontendCatalogManager is only used for creating distributed script table now.
|
||||
// Once we have some standalone distributed table creator (like create distributed table procedure),
|
||||
// we should use that.
|
||||
dist_instance: Option<Arc<DistInstance>>,
|
||||
}
|
||||
|
||||
impl FrontendCatalogManager {
|
||||
@@ -55,9 +68,14 @@ impl FrontendCatalogManager {
|
||||
backend,
|
||||
partition_manager,
|
||||
datanode_clients,
|
||||
dist_instance: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_dist_instance(&mut self, dist_instance: Arc<DistInstance>) {
|
||||
self.dist_instance = Some(dist_instance)
|
||||
}
|
||||
|
||||
pub(crate) fn backend(&self) -> KvBackendRef {
|
||||
self.backend.clone()
|
||||
}
|
||||
@@ -106,9 +124,93 @@ impl CatalogManager for FrontendCatalogManager {
|
||||
|
||||
async fn register_system_table(
|
||||
&self,
|
||||
_request: RegisterSystemTableRequest,
|
||||
request: RegisterSystemTableRequest,
|
||||
) -> catalog::error::Result<()> {
|
||||
unimplemented!()
|
||||
if let Some(dist_instance) = &self.dist_instance {
|
||||
let open_hook = request.open_hook;
|
||||
let request = request.create_table_request;
|
||||
|
||||
if let Some(table) = self
|
||||
.table(
|
||||
&request.catalog_name,
|
||||
&request.schema_name,
|
||||
&request.table_name,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
if let Some(hook) = open_hook {
|
||||
(hook)(table)?;
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let time_index = request
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.find_map(|x| {
|
||||
if x.is_time_index() {
|
||||
Some(x.name.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.context(InvalidSystemTableDefSnafu {
|
||||
err_msg: "Time index is not defined.",
|
||||
})?;
|
||||
|
||||
let primary_keys = request
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, x)| {
|
||||
if request.primary_key_indices.contains(&i) {
|
||||
Some(x.name.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let column_defs = expr_factory::column_schemas_to_defs(request.schema.column_schemas)
|
||||
.map_err(|e| {
|
||||
InvalidSystemTableDefSnafu {
|
||||
err_msg: e.to_string(),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
|
||||
let mut create_table = CreateTableExpr {
|
||||
catalog_name: request.catalog_name,
|
||||
schema_name: request.schema_name,
|
||||
table_name: request.table_name,
|
||||
desc: request.desc.unwrap_or("".to_string()),
|
||||
column_defs,
|
||||
time_index,
|
||||
primary_keys,
|
||||
create_if_not_exists: request.create_if_not_exists,
|
||||
table_options: (&request.table_options).into(),
|
||||
table_id: None, // Should and will be assigned by Meta.
|
||||
region_ids: vec![0],
|
||||
};
|
||||
|
||||
let table = dist_instance
|
||||
.create_table(&mut create_table, None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?;
|
||||
|
||||
if let Some(hook) = open_hook {
|
||||
(hook)(table)?;
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
UnimplementedSnafu {
|
||||
operation: "register system table",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
fn schema(
|
||||
@@ -330,3 +432,70 @@ impl SchemaProvider for FrontendSchemaProvider {
|
||||
Ok(self.table_names()?.contains(&name.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use script::table::{build_scripts_schema, SCRIPTS_TABLE_NAME};
|
||||
use table::requests::{CreateTableRequest, TableOptions};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_register_system_table() {
|
||||
let instance =
|
||||
crate::tests::create_distributed_instance("test_register_system_table").await;
|
||||
|
||||
let catalog_name = DEFAULT_CATALOG_NAME;
|
||||
let schema_name = DEFAULT_SCHEMA_NAME;
|
||||
let table_name = SCRIPTS_TABLE_NAME;
|
||||
let request = CreateTableRequest {
|
||||
id: 1,
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: Some("Scripts table".to_string()),
|
||||
schema: build_scripts_schema(),
|
||||
region_numbers: vec![0],
|
||||
primary_key_indices: vec![0, 1],
|
||||
create_if_not_exists: true,
|
||||
table_options: TableOptions::default(),
|
||||
};
|
||||
|
||||
let result = instance
|
||||
.catalog_manager
|
||||
.register_system_table(RegisterSystemTableRequest {
|
||||
create_table_request: request,
|
||||
open_hook: None,
|
||||
})
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
assert!(
|
||||
instance
|
||||
.catalog_manager
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some(),
|
||||
"the registered system table cannot be found in catalog"
|
||||
);
|
||||
|
||||
let mut actually_created_table_in_datanode = 0;
|
||||
for datanode in instance.datanodes.values() {
|
||||
if datanode
|
||||
.catalog_manager()
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some()
|
||||
{
|
||||
actually_created_table_in_datanode += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
actually_created_table_in_datanode, 1,
|
||||
"system table should be actually created at one and only one datanode"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -378,6 +378,12 @@ pub enum Error {
|
||||
#[snafu(backtrace)]
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start script manager, source: {}", source))]
|
||||
StartScriptManager {
|
||||
#[snafu(backtrace)]
|
||||
source: script::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -462,6 +468,8 @@ impl ErrorExt for Error {
|
||||
source.status_code()
|
||||
}
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartScriptManager { source } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -187,7 +187,12 @@ fn columns_to_expr(
|
||||
.iter()
|
||||
.map(|c| column_def_to_schema(c, c.name.to_string() == time_index).context(ParseSqlSnafu))
|
||||
.collect::<Result<Vec<ColumnSchema>>>()?;
|
||||
column_schemas_to_defs(column_schemas)
|
||||
}
|
||||
|
||||
pub(crate) fn column_schemas_to_defs(
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
) -> Result<Vec<api::v1::ColumnDef>> {
|
||||
let column_datatypes = column_schemas
|
||||
.iter()
|
||||
.map(|c| {
|
||||
|
||||
@@ -17,6 +17,7 @@ mod grpc;
|
||||
mod influxdb;
|
||||
mod opentsdb;
|
||||
mod prometheus;
|
||||
mod script;
|
||||
mod standalone;
|
||||
|
||||
use std::collections::HashMap;
|
||||
@@ -40,7 +41,6 @@ use common_telemetry::timer;
|
||||
use datafusion::sql::sqlparser::ast::ObjectName;
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datanode::instance::InstanceRef as DnInstanceRef;
|
||||
use datanode::metric;
|
||||
use datatypes::schema::Schema;
|
||||
use distributed::DistInstance;
|
||||
use meta_client::client::{MetaClient, MetaClientBuilder};
|
||||
@@ -58,26 +58,29 @@ use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::{
|
||||
InfluxdbLineProtocolHandler, OpentsdbProtocolHandler, PrometheusProtocolHandler, ScriptHandler,
|
||||
ScriptHandlerRef,
|
||||
};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::prelude::*;
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::tql::Tql;
|
||||
|
||||
use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExecuteStatementSnafu, ExternalSnafu,
|
||||
InvalidInsertRequestSnafu, MissingMetasrvOptsSnafu, NotSupportedSnafu, ParseQuerySnafu,
|
||||
ParseSqlSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
|
||||
self, CatalogSnafu, DescribeStatementSnafu, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu,
|
||||
ExecuteStatementSnafu, ExternalSnafu, InvalidInsertRequestSnafu, MissingMetasrvOptsSnafu,
|
||||
NotSupportedSnafu, ParseQuerySnafu, ParseSqlSnafu, PlanStatementSnafu, Result,
|
||||
SqlExecInterceptedSnafu, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
|
||||
use crate::frontend::FrontendOptions;
|
||||
use crate::instance::standalone::StandaloneGrpcQueryHandler;
|
||||
use crate::metric;
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::server::{start_server, ServerHandlers, Services};
|
||||
|
||||
#[async_trait]
|
||||
@@ -101,9 +104,7 @@ pub type FrontendInstanceRef = Arc<dyn FrontendInstance>;
|
||||
#[derive(Clone)]
|
||||
pub struct Instance {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
|
||||
/// Script handler is None in distributed mode, only works on standalone mode.
|
||||
script_handler: Option<ScriptHandlerRef>,
|
||||
script_executor: Arc<ScriptExecutor>,
|
||||
statement_handler: StatementHandlerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
grpc_query_handler: GrpcQueryHandlerRef<Error>,
|
||||
@@ -132,23 +133,29 @@ impl Instance {
|
||||
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let catalog_manager = Arc::new(FrontendCatalogManager::new(
|
||||
meta_backend,
|
||||
partition_manager,
|
||||
datanode_clients.clone(),
|
||||
));
|
||||
let mut catalog_manager =
|
||||
FrontendCatalogManager::new(meta_backend, partition_manager, datanode_clients.clone());
|
||||
|
||||
let dist_instance =
|
||||
DistInstance::new(meta_client, catalog_manager.clone(), datanode_clients);
|
||||
let dist_instance = DistInstance::new(
|
||||
meta_client,
|
||||
Arc::new(catalog_manager.clone()),
|
||||
datanode_clients,
|
||||
);
|
||||
let dist_instance = Arc::new(dist_instance);
|
||||
|
||||
catalog_manager.set_dist_instance(dist_instance.clone());
|
||||
let catalog_manager = Arc::new(catalog_manager);
|
||||
|
||||
let query_engine =
|
||||
QueryEngineFactory::new_with_plugins(catalog_manager.clone(), plugins.clone())
|
||||
.query_engine();
|
||||
|
||||
let script_executor =
|
||||
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
|
||||
|
||||
Ok(Instance {
|
||||
catalog_manager,
|
||||
script_handler: None,
|
||||
script_executor,
|
||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||
statement_handler: dist_instance.clone(),
|
||||
query_engine,
|
||||
@@ -189,18 +196,22 @@ impl Instance {
|
||||
Ok(Arc::new(meta_client))
|
||||
}
|
||||
|
||||
pub fn new_standalone(dn_instance: DnInstanceRef) -> Self {
|
||||
Instance {
|
||||
catalog_manager: dn_instance.catalog_manager().clone(),
|
||||
script_handler: None,
|
||||
pub async fn try_new_standalone(dn_instance: DnInstanceRef) -> Result<Self> {
|
||||
let catalog_manager = dn_instance.catalog_manager();
|
||||
let query_engine = dn_instance.query_engine();
|
||||
let script_executor =
|
||||
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
|
||||
Ok(Instance {
|
||||
catalog_manager: catalog_manager.clone(),
|
||||
script_executor,
|
||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||
statement_handler: dn_instance.clone(),
|
||||
query_engine: dn_instance.query_engine(),
|
||||
query_engine,
|
||||
grpc_query_handler: StandaloneGrpcQueryHandler::arc(dn_instance.clone()),
|
||||
promql_handler: Some(dn_instance.clone()),
|
||||
plugins: Default::default(),
|
||||
servers: Arc::new(HashMap::new()),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn build_servers(
|
||||
@@ -215,12 +226,19 @@ impl Instance {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new_distributed(dist_instance: Arc<DistInstance>) -> Self {
|
||||
let catalog_manager = dist_instance.catalog_manager();
|
||||
pub(crate) async fn new_distributed(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
dist_instance: Arc<DistInstance>,
|
||||
) -> Self {
|
||||
let query_engine = QueryEngineFactory::new(catalog_manager.clone()).query_engine();
|
||||
let script_executor = Arc::new(
|
||||
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone())
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
Instance {
|
||||
catalog_manager,
|
||||
script_handler: None,
|
||||
script_executor,
|
||||
statement_handler: dist_instance.clone(),
|
||||
query_engine,
|
||||
create_expr_factory: Arc::new(DefaultCreateExprFactory),
|
||||
@@ -235,14 +253,6 @@ impl Instance {
|
||||
&self.catalog_manager
|
||||
}
|
||||
|
||||
pub fn set_script_handler(&mut self, handler: ScriptHandlerRef) {
|
||||
debug_assert!(
|
||||
self.script_handler.is_none(),
|
||||
"Script handler can be set only once!"
|
||||
);
|
||||
self.script_handler = Some(handler);
|
||||
}
|
||||
|
||||
/// Handle batch inserts
|
||||
pub async fn handle_inserts(
|
||||
&self,
|
||||
@@ -428,52 +438,87 @@ fn parse_stmt(sql: &str) -> Result<Vec<Statement>> {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
async fn plan_exec(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let planner = self.query_engine.planner();
|
||||
let plan = planner
|
||||
.plan(QueryStatement::Sql(stmt), query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||
self.query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
self.query_engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
async fn describe_table(
|
||||
&self,
|
||||
stmt: DescribeTable,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output> {
|
||||
let (catalog, schema, table) = table_idents_to_full_name(stmt.name(), query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&catalog, &schema, &table)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: stmt.name().to_string(),
|
||||
})?;
|
||||
|
||||
query::sql::describe_table(table).context(DescribeStatementSnafu)
|
||||
}
|
||||
|
||||
async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
|
||||
|
||||
let planner = self.query_engine.planner();
|
||||
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) => {
|
||||
let plan = planner
|
||||
.plan(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
self.plan_exec(stmt, query_ctx).await
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt =
|
||||
QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||
planner
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
|
||||
// For performance consideration, only "insert with select" is executed by query engine.
|
||||
// Plain insert ("insert with values") is still executed directly in statement.
|
||||
Statement::Insert(ref insert) if insert.is_insert_select() => {
|
||||
self.plan_exec(stmt, query_ctx).await
|
||||
}
|
||||
|
||||
Statement::Tql(tql) => self.execute_tql(tql, query_ctx).await,
|
||||
|
||||
Statement::DescribeTable(stmt) => self.describe_table(stmt, query_ctx).await,
|
||||
|
||||
Statement::CreateDatabase(_)
|
||||
| Statement::CreateExternalTable(_)
|
||||
| Statement::ShowDatabases(_)
|
||||
| Statement::CreateTable(_)
|
||||
| Statement::ShowTables(_)
|
||||
| Statement::DescribeTable(_)
|
||||
| Statement::Insert(_)
|
||||
| Statement::Delete(_)
|
||||
| Statement::Alter(_)
|
||||
| Statement::DropTable(_)
|
||||
| Statement::Copy(_) => self
|
||||
@@ -583,41 +628,6 @@ impl SqlQueryHandler for Instance {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ScriptHandler for Instance {
|
||||
async fn insert_script(
|
||||
&self,
|
||||
schema: &str,
|
||||
name: &str,
|
||||
script: &str,
|
||||
) -> server_error::Result<()> {
|
||||
if let Some(handler) = &self.script_handler {
|
||||
handler.insert_script(schema, name, script).await
|
||||
} else {
|
||||
server_error::NotSupportedSnafu {
|
||||
feat: "Script execution in Frontend",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_script(
|
||||
&self,
|
||||
schema: &str,
|
||||
script: &str,
|
||||
params: HashMap<String, String>,
|
||||
) -> server_error::Result<Output> {
|
||||
if let Some(handler) = &self.script_handler {
|
||||
handler.execute_script(schema, script, params).await
|
||||
} else {
|
||||
server_error::NotSupportedSnafu {
|
||||
feat: "Script execution in Frontend",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PromHandler for Instance {
|
||||
async fn do_query(&self, query: &PromQuery) -> server_error::Result<Output> {
|
||||
@@ -647,12 +657,13 @@ pub fn check_permission(
|
||||
}
|
||||
|
||||
match stmt {
|
||||
// query,explain and tql will be checked in QueryEngineState
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) => {}
|
||||
// These are executed by query engine, and will be checked there.
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) | Statement::Delete(_) => {}
|
||||
// database ops won't be checked
|
||||
Statement::CreateDatabase(_) | Statement::ShowDatabases(_) | Statement::Use(_) => {}
|
||||
// show create table and alter are not supported yet
|
||||
Statement::ShowCreateTable(_) | Statement::Alter(_) => {}
|
||||
Statement::ShowCreateTable(_) | Statement::CreateExternalTable(_) | Statement::Alter(_) => {
|
||||
}
|
||||
|
||||
Statement::Insert(insert) => {
|
||||
validate_param(insert.table_name(), query_ctx)?;
|
||||
@@ -673,9 +684,6 @@ pub fn check_permission(
|
||||
Statement::DescribeTable(stmt) => {
|
||||
validate_param(stmt.name(), query_ctx)?;
|
||||
}
|
||||
Statement::Delete(delete) => {
|
||||
validate_param(delete.table_name(), query_ctx)?;
|
||||
}
|
||||
Statement::Copy(stmd) => match stmd {
|
||||
CopyTable::To(copy_table_to) => validate_param(©_table_to.table_name, query_ctx)?,
|
||||
CopyTable::From(copy_table_from) => {
|
||||
@@ -1086,7 +1094,7 @@ mod tests {
|
||||
.plan(stmt.clone(), QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
@@ -33,6 +33,7 @@ use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_telemetry::{debug, info};
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datanode::sql::SqlHandler;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use meta_client::client::MetaClient;
|
||||
@@ -45,7 +46,7 @@ use partition::partition::{PartitionBound, PartitionDef};
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::parser::QueryStatement;
|
||||
use query::query_engine::StatementHandler;
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use query::sql::{show_databases, show_tables};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
@@ -55,18 +56,18 @@ use sql::statements::statement::Statement;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
use table::table::AlterContext;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, AlterExprToRequestSnafu, CatalogEntrySerdeSnafu, CatalogSnafu, ColumnDataTypeSnafu,
|
||||
DeserializePartitionSnafu, NotSupportedSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu,
|
||||
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu,
|
||||
TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu,
|
||||
UnrecognizedTableOptionSnafu,
|
||||
DeserializePartitionSnafu, InvokeDatanodeSnafu, NotSupportedSnafu, ParseSqlSnafu,
|
||||
PrimaryKeyNotFoundSnafu, RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu,
|
||||
StartMetaClientSnafu, TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu,
|
||||
ToTableInsertRequestSnafu, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
use crate::expr_factory;
|
||||
use crate::sql::insert_to_request;
|
||||
use crate::table::DistTable;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -93,14 +94,14 @@ impl DistInstance {
|
||||
&self,
|
||||
create_table: &mut CreateTableExpr,
|
||||
partitions: Option<Partitions>,
|
||||
) -> Result<Output> {
|
||||
) -> Result<TableRef> {
|
||||
let table_name = TableName::new(
|
||||
&create_table.catalog_name,
|
||||
&create_table.schema_name,
|
||||
&create_table.table_name,
|
||||
);
|
||||
|
||||
if self
|
||||
if let Some(table) = self
|
||||
.catalog_manager
|
||||
.table(
|
||||
&table_name.catalog_name,
|
||||
@@ -109,10 +110,9 @@ impl DistInstance {
|
||||
)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.is_some()
|
||||
{
|
||||
return if create_table.create_if_not_exists {
|
||||
Ok(Output::AffectedRows(0))
|
||||
Ok(table)
|
||||
} else {
|
||||
TableAlreadyExistSnafu {
|
||||
table: table_name.to_string(),
|
||||
@@ -153,20 +153,20 @@ impl DistInstance {
|
||||
|
||||
create_table.table_id = Some(TableId { id: table_id });
|
||||
|
||||
let table = DistTable::new(
|
||||
let table = Arc::new(DistTable::new(
|
||||
table_name.clone(),
|
||||
table_info,
|
||||
self.catalog_manager.partition_manager(),
|
||||
self.catalog_manager.datanode_clients(),
|
||||
self.catalog_manager.backend(),
|
||||
);
|
||||
));
|
||||
|
||||
let request = RegisterTableRequest {
|
||||
catalog: table_name.catalog_name.clone(),
|
||||
schema: table_name.schema_name.clone(),
|
||||
table_name: table_name.table_name.clone(),
|
||||
table_id,
|
||||
table: Arc::new(table),
|
||||
table: table.clone(),
|
||||
};
|
||||
ensure!(
|
||||
self.catalog_manager
|
||||
@@ -196,9 +196,7 @@ impl DistInstance {
|
||||
.await
|
||||
.context(RequestDatanodeSnafu)?;
|
||||
}
|
||||
|
||||
// Checked in real MySQL, it truly returns "0 rows affected".
|
||||
Ok(Output::AffectedRows(0))
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn drop_table(&self, table_name: TableName) -> Result<Output> {
|
||||
@@ -329,7 +327,8 @@ impl DistInstance {
|
||||
}
|
||||
Statement::CreateTable(stmt) => {
|
||||
let create_expr = &mut expr_factory::create_to_expr(&stmt, query_ctx)?;
|
||||
Ok(self.create_table(create_expr, stmt.partitions).await?)
|
||||
let _ = self.create_table(create_expr, stmt.partitions).await?;
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
Statement::Alter(alter_table) => {
|
||||
let expr = grpc::to_alter_expr(alter_table, query_ctx)?;
|
||||
@@ -347,20 +346,6 @@ impl DistInstance {
|
||||
Statement::ShowTables(stmt) => {
|
||||
show_tables(stmt, self.catalog_manager.clone(), query_ctx)
|
||||
}
|
||||
Statement::DescribeTable(stmt) => {
|
||||
let (catalog, schema, table) = table_idents_to_full_name(stmt.name(), query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExternalSnafu)?;
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&catalog, &schema, &table)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: stmt.name().to_string(),
|
||||
})?;
|
||||
describe_table(table)
|
||||
}
|
||||
Statement::Insert(insert) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(insert.table_name(), query_ctx.clone())
|
||||
@@ -374,7 +359,10 @@ impl DistInstance {
|
||||
.context(CatalogSnafu)?
|
||||
.context(TableNotFoundSnafu { table_name: table })?;
|
||||
|
||||
let insert_request = insert_to_request(&table, *insert, query_ctx)?;
|
||||
let insert_request =
|
||||
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
|
||||
.await
|
||||
.context(InvokeDatanodeSnafu)?;
|
||||
|
||||
return Ok(Output::AffectedRows(
|
||||
table.insert(insert_request).await.context(TableSnafu)?,
|
||||
|
||||
@@ -49,7 +49,8 @@ impl GrpcQueryHandler for DistInstance {
|
||||
DdlExpr::CreateTable(mut expr) => {
|
||||
// TODO(LFC): Support creating distributed table through GRPC interface.
|
||||
// Currently only SQL supports it; how to design the fields in CreateTableExpr?
|
||||
self.create_table(&mut expr, None).await
|
||||
let _ = self.create_table(&mut expr, None).await;
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
DdlExpr::Alter(expr) => self.handle_alter_table(expr).await,
|
||||
DdlExpr::DropTable(expr) => {
|
||||
|
||||
@@ -383,8 +383,6 @@ CREATE TABLE {table_name} (
|
||||
// Wait for previous task finished
|
||||
flush_table(frontend, "greptime", "public", table_name, None).await;
|
||||
|
||||
let table_id = 1024;
|
||||
|
||||
let table = instance
|
||||
.frontend
|
||||
.catalog_manager()
|
||||
@@ -394,7 +392,7 @@ CREATE TABLE {table_name} (
|
||||
.unwrap();
|
||||
let table = table.as_any().downcast_ref::<DistTable>().unwrap();
|
||||
|
||||
let TableGlobalValue { regions_id_map, .. } = table
|
||||
let tgv = table
|
||||
.table_global_value(&TableGlobalKey {
|
||||
catalog_name: "greptime".to_string(),
|
||||
schema_name: "public".to_string(),
|
||||
@@ -403,7 +401,10 @@ CREATE TABLE {table_name} (
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let region_to_dn_map = regions_id_map
|
||||
let table_id = tgv.table_id();
|
||||
|
||||
let region_to_dn_map = tgv
|
||||
.regions_id_map
|
||||
.iter()
|
||||
.map(|(k, v)| (v[0], *k))
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
@@ -590,7 +591,7 @@ CREATE TABLE {table_name} (
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(trait_upcasting)]
|
||||
|
||||
pub mod catalog;
|
||||
pub mod datanode;
|
||||
@@ -22,13 +23,14 @@ pub mod frontend;
|
||||
pub mod grpc;
|
||||
pub mod influxdb;
|
||||
pub mod instance;
|
||||
pub(crate) mod metric;
|
||||
pub mod mysql;
|
||||
pub mod opentsdb;
|
||||
pub mod postgres;
|
||||
pub mod prom;
|
||||
pub mod prometheus;
|
||||
mod script;
|
||||
mod server;
|
||||
mod sql;
|
||||
mod table;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
17
src/frontend/src/metric.rs
Normal file
17
src/frontend/src/metric.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) const METRIC_HANDLE_SQL_ELAPSED: &str = "frontend.handle_sql_elapsed";
|
||||
pub(crate) const METRIC_HANDLE_SCRIPTS_ELAPSED: &str = "frontend.handle_scripts_elapsed";
|
||||
pub(crate) const METRIC_RUN_SCRIPT_ELAPSED: &str = "frontend.run_script_elapsed";
|
||||
@@ -22,7 +22,7 @@ use common_telemetry::info;
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::error::Error::InternalIo;
|
||||
use servers::grpc::GrpcServer;
|
||||
use servers::http::HttpServer;
|
||||
use servers::http::HttpServerBuilder;
|
||||
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
|
||||
use servers::opentsdb::OpentsdbServer;
|
||||
use servers::postgres::PostgresServer;
|
||||
@@ -150,33 +150,33 @@ impl Services {
|
||||
if let Some(http_options) = &opts.http_options {
|
||||
let http_addr = parse_addr(&http_options.addr)?;
|
||||
|
||||
let mut http_server = HttpServer::new(
|
||||
ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
|
||||
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
|
||||
http_options.clone(),
|
||||
);
|
||||
let mut http_server_builder = HttpServerBuilder::new(http_options.clone());
|
||||
http_server_builder
|
||||
.with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(instance.clone()))
|
||||
.with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(instance.clone()));
|
||||
|
||||
if let Some(user_provider) = user_provider.clone() {
|
||||
http_server.set_user_provider(user_provider);
|
||||
http_server_builder.with_user_provider(user_provider);
|
||||
}
|
||||
|
||||
if set_opentsdb_handler {
|
||||
http_server.set_opentsdb_handler(instance.clone());
|
||||
http_server_builder.with_opentsdb_handler(instance.clone());
|
||||
}
|
||||
if matches!(
|
||||
opts.influxdb_options,
|
||||
Some(InfluxdbOptions { enable: true })
|
||||
) {
|
||||
http_server.set_influxdb_handler(instance.clone());
|
||||
http_server_builder.with_influxdb_handler(instance.clone());
|
||||
}
|
||||
|
||||
if matches!(
|
||||
opts.prometheus_options,
|
||||
Some(PrometheusOptions { enable: true })
|
||||
) {
|
||||
http_server.set_prom_handler(instance.clone());
|
||||
http_server_builder.with_prom_handler(instance.clone());
|
||||
}
|
||||
http_server.set_script_handler(instance.clone());
|
||||
|
||||
http_server_builder.with_script_handler(instance.clone());
|
||||
let http_server = http_server_builder.build();
|
||||
result.push((Box::new(http_server), http_addr));
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user