mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 00:19:58 +00:00
Compare commits
46 Commits
v0.1.2-alp
...
v0.2.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
509d07b798 | ||
|
|
e72ce5eaa9 | ||
|
|
f491a040f5 | ||
|
|
47179a7812 | ||
|
|
995a28a27d | ||
|
|
ed1cb73ffc | ||
|
|
0ffa628c22 | ||
|
|
5edd2a3dbe | ||
|
|
e63b28bff1 | ||
|
|
8140d4e3e5 | ||
|
|
6825459c75 | ||
|
|
7eb4d81929 | ||
|
|
8ba0741c81 | ||
|
|
0eeb5b460c | ||
|
|
65ea6fd85f | ||
|
|
4f15b26b28 | ||
|
|
15ee4ac729 | ||
|
|
b4fc8c5b78 | ||
|
|
6f81717866 | ||
|
|
77f9383daf | ||
|
|
c788b7fc26 | ||
|
|
0f160a73be | ||
|
|
92963b9614 | ||
|
|
f1139fba59 | ||
|
|
4e552245b1 | ||
|
|
3126bbc1c7 | ||
|
|
b77b561bc8 | ||
|
|
501faad8ab | ||
|
|
5397a9bbe6 | ||
|
|
f351ee7042 | ||
|
|
e0493e0b8f | ||
|
|
b2a09c888a | ||
|
|
af101480b3 | ||
|
|
b8f7f603cf | ||
|
|
8fb97ea1d8 | ||
|
|
21ce9c1163 | ||
|
|
0a22375ac1 | ||
|
|
0596d20a3b | ||
|
|
e19c8fa2b6 | ||
|
|
ad886f5b3e | ||
|
|
f6669a8201 | ||
|
|
ad5c47185d | ||
|
|
64441616db | ||
|
|
09491d6aee | ||
|
|
7cfa30b2ab | ||
|
|
a7676d8860 |
91
.github/workflows/release.yml
vendored
91
.github/workflows/release.yml
vendored
@@ -32,21 +32,37 @@ jobs:
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
@@ -105,11 +121,12 @@ jobs:
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
- name: Compile Python 3.10.10 from source for Aarch64
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
|
||||
- name: Compile Python 3.10.10 from source for linux
|
||||
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -124,17 +141,51 @@ jobs:
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=python3
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for amd64-linux
|
||||
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "version=3.10" >> pyo3.config
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "shared=true" >> pyo3.config
|
||||
echo "abi3=true" >> pyo3.config
|
||||
echo "lib_name=python3.10" >> pyo3.config
|
||||
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
|
||||
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
|
||||
echo "pointer_width=64" >> pyo3.config
|
||||
echo "build_flags=" >> pyo3.config
|
||||
echo "suppress_build_script_link_lines=false" >> pyo3.config
|
||||
|
||||
cat pyo3.config
|
||||
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
@@ -196,35 +247,33 @@ jobs:
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
name: greptime-linux-amd64-pyo3
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
|
||||
cp -r amd64 docker/ci
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
name: greptime-linux-arm64-pyo3
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
id: unzip-arm64
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
|
||||
cp -r arm64 docker/ci
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
|
||||
with:
|
||||
context: .
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -236,7 +285,7 @@ jobs:
|
||||
uses: docker/build-push-action@v3
|
||||
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
|
||||
with:
|
||||
context: .
|
||||
context: ./docker/ci/
|
||||
file: ./docker/ci/Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -35,3 +35,7 @@ benchmarks/data
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# dashboard files
|
||||
!/src/servers/dashboard/VERSION
|
||||
/src/servers/dashboard/*
|
||||
|
||||
556
Cargo.lock
generated
556
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@ members = [
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
@@ -68,6 +69,7 @@ futures-util = "0.3"
|
||||
parquet = "34.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
19
README.md
19
README.md
@@ -1,8 +1,8 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset="/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="/docs/logo-text-padding.png" width="400px">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
@@ -61,12 +61,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
|
||||
in cpython): this install a Python shared library required for running python
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default.
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -147,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
downloadable pre-built binaries for Linux and MacOS
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
|
||||
Docker images
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
@@ -158,6 +158,7 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
@@ -21,12 +21,12 @@ use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -61,7 +61,7 @@ struct Args {
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
@@ -97,6 +97,9 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
@@ -122,11 +125,16 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let values = build_values(array);
|
||||
let (values, datatype) = build_values(array);
|
||||
let column = Column {
|
||||
column_name: field.name().to_owned(),
|
||||
values: Some(values),
|
||||
null_mask: vec![],
|
||||
null_mask: array
|
||||
.data()
|
||||
.null_bitmap()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
};
|
||||
@@ -136,7 +144,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
@@ -144,10 +152,13 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
@@ -155,29 +166,38 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
@@ -213,6 +233,10 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
@@ -340,7 +364,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
|
||||
tar -xvf Python-3.10.10.tgz
|
||||
cd Python-3.10.10
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
@@ -14,33 +41,47 @@ cd Python-3.10.10
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
./configure \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
|
||||
cd Python-3.10.10 && \
|
||||
make clean && \
|
||||
make distclean && \
|
||||
alias python=python3 && \
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix=$PY_INSTALL_PATH --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make && make altinstall && \
|
||||
cd ..
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
COPY requirements.txt /etc/greptime/requirements.txt
|
||||
|
||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
5
docker/ci/requirements.txt
Normal file
5
docker/ci/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
numpy>=1.24.2
|
||||
pandas>=1.5.3
|
||||
pyarrow>=11.0.0
|
||||
requests>=2.28.2
|
||||
scipy>=1.10.1
|
||||
39
scripts/fetch-dashboard-assets.sh
Executable file
39
scripts/fetch-dashboard-assets.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is used to download built dashboard assets from the "GreptimeTeam/dashboard" repository.
|
||||
|
||||
set -e
|
||||
|
||||
declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd)
|
||||
declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR})
|
||||
declare -r STATIC_DIR="$ROOT_DIR/src/servers/dashboard"
|
||||
|
||||
RELEASE_VERSION="$(cat $STATIC_DIR/VERSION)"
|
||||
|
||||
# Download the SHA256 checksum attached to the release. To verify the integrity
|
||||
# of the download, this checksum will be used to check the download tar file
|
||||
# containing the built dashboard assets.
|
||||
curl -Ls https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/sha256.txt --output sha256.txt
|
||||
|
||||
# Download the tar file containing the built dashboard assets.
|
||||
curl -L https://github.com/GreptimeTeam/dashboard/releases/download/$RELEASE_VERSION/build.tar.gz --output build.tar.gz
|
||||
|
||||
# Verify the checksums match; exit if they don't.
|
||||
case "$(uname -s)" in
|
||||
FreeBSD | Darwin)
|
||||
echo "$(cat sha256.txt)" | shasum --algorithm 256 --check \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
Linux)
|
||||
echo "$(cat sha256.txt)" | sha256sum --check -- \
|
||||
|| { echo "Checksums did not match for downloaded dashboard assets!"; exit 1; } ;;
|
||||
*)
|
||||
echo "The '$(uname -s)' operating system is not supported as a build host for the dashboard" >&2
|
||||
exit 1
|
||||
esac
|
||||
|
||||
# Extract the assets and clean up.
|
||||
tar -xzf build.tar.gz -C "$STATIC_DIR"
|
||||
rm sha256.txt
|
||||
rm build.tar.gz
|
||||
|
||||
echo "Successfully download dashboard assets to $STATIC_DIR"
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3a715150563b89d5dfc81a5838eac1f66a5658a1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d3861c34f7920238869d0d4e50dc1e6b189d2a6b" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -19,8 +19,8 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::ResultExt;
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
@@ -228,43 +228,32 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
|
||||
/// The stat of regions in the datanode node.
|
||||
/// The number of regions can be got from len of vec.
|
||||
pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<RegionStat>> {
|
||||
///
|
||||
/// Ignores any errors occurred during iterating regions. The intention of this method is to
|
||||
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
|
||||
/// "try our best" job.
|
||||
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
.catalog(&catalog_name)?
|
||||
.context(error::CatalogNotFoundSnafu {
|
||||
catalog_name: &catalog_name,
|
||||
})?;
|
||||
|
||||
for schema_name in catalog.schema_names()? {
|
||||
let schema = catalog
|
||||
.schema(&schema_name)?
|
||||
.context(error::SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names() else { return (region_number, region_stats) };
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(Some(catalog)) = catalog_manager.catalog(&catalog_name) else { continue };
|
||||
|
||||
for table_name in schema.table_names()? {
|
||||
let table =
|
||||
schema
|
||||
.table(&table_name)
|
||||
.await?
|
||||
.context(error::TableNotFoundSnafu {
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
let Ok(schema_names) = catalog.schema_names() else { continue };
|
||||
for schema_name in schema_names {
|
||||
let Ok(Some(schema)) = catalog.schema(&schema_name) else { continue };
|
||||
|
||||
region_stats.extend(
|
||||
table
|
||||
.region_stats()
|
||||
.context(error::RegionStatsSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
table: &table_name,
|
||||
})?
|
||||
.into_iter()
|
||||
.map(|stat| RegionStat {
|
||||
let Ok(table_names) = schema.table_names() else { continue };
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = schema.table(&table_name).await else { continue };
|
||||
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
@@ -273,10 +262,16 @@ pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<Reg
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
..Default::default()
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
region_stats.extend(stats);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get region status, err: {:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_stats)
|
||||
(region_number, region_stats)
|
||||
}
|
||||
|
||||
@@ -400,7 +400,7 @@ mod tests {
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use object_store::ObjectStore;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
@@ -482,11 +482,9 @@ mod tests {
|
||||
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
|
||||
let dir = create_temp_dir("system-table-test");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = object_store::services::Fs::default()
|
||||
.root(&store_dir)
|
||||
.build()
|
||||
.unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = object_store::services::Fs::default();
|
||||
builder.root(&store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let table_engine = Arc::new(MitoEngine::new(
|
||||
EngineConfig::default(),
|
||||
|
||||
@@ -23,7 +23,7 @@ enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
@@ -23,6 +26,10 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
@@ -118,7 +125,7 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
@@ -131,11 +138,30 @@ impl Client {
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
let mut client = HealthCheckClient::new(channel);
|
||||
client.health_check(HealthCheckRequest {}).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,15 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||
GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
@@ -31,28 +30,49 @@ use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
// The "catalog" and "schema" to be used in processing the requests at the server side.
|
||||
// They are the "hint" or "context", just like how the "database" in "USE" statement is treated in MySQL.
|
||||
// They will be carried in the request header.
|
||||
catalog: String,
|
||||
schema: String,
|
||||
// The dbname follows naming rule as out mysql, postgres and http
|
||||
// protocol. The server treat dbname in priority of catalog/schema.
|
||||
dbname: String,
|
||||
|
||||
client: Client,
|
||||
ctx: FlightContext,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
/// Create database service client using catalog and schema
|
||||
pub fn new(catalog: impl Into<String>, schema: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
catalog: catalog.into(),
|
||||
schema: schema.into(),
|
||||
client,
|
||||
ctx: FlightContext::default(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create database service client using dbname.
|
||||
///
|
||||
/// This API is designed for external usage. `dbname` is:
|
||||
///
|
||||
/// - the name of database when using GreptimeDB standalone or cluster
|
||||
/// - the name provided by GreptimeCloud or other multi-tenant GreptimeDB
|
||||
/// environment
|
||||
pub fn new_with_dbname(dbname: impl Into<String>, client: Client) -> Self {
|
||||
Self {
|
||||
dbname: dbname.into(),
|
||||
client,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,14 +92,41 @@ impl Database {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn dbname(&self) -> &String {
|
||||
&self.dbname
|
||||
}
|
||||
|
||||
pub fn set_dbname(&mut self, dbname: impl Into<String>) {
|
||||
self.dbname = dbname.into();
|
||||
}
|
||||
|
||||
pub fn set_auth(&mut self, auth: AuthScheme) {
|
||||
self.ctx.auth_header = Some(AuthHeader {
|
||||
auth_scheme: Some(auth),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
@@ -148,6 +195,7 @@ impl Database {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
}),
|
||||
request: Some(request),
|
||||
};
|
||||
@@ -155,7 +203,7 @@ impl Database {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
@@ -164,22 +212,22 @@ impl Database {
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
e.code(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
@@ -210,12 +258,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -65,9 +66,12 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
/// Error deserialized from gRPC metadata
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -77,13 +81,15 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ExternalError { code, .. } => *code,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,3 +101,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![doc = include_str!("../../../../README.md")]
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
@@ -31,7 +31,6 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle cli shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -34,8 +34,10 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle datanode shutdown
|
||||
Ok(())
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,24 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -44,6 +56,12 @@ pub enum Error {
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
|
||||
ReadConfig {
|
||||
path: String,
|
||||
@@ -143,7 +161,10 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
|
||||
@@ -47,8 +47,10 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle frontend shutdown
|
||||
Ok(())
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownFrontendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,13 +30,14 @@ impl Instance {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)?;
|
||||
Ok(())
|
||||
.context(error::StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle metasrv shutdown
|
||||
Ok(())
|
||||
self.instance
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownMetaServerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,10 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -152,7 +155,17 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle standalone shutdown
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown_instance()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
13
src/common/datasource/Cargo.toml
Normal file
13
src/common/datasource/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "common-datasource"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
url = "2.3"
|
||||
75
src/common/datasource/src/error.rs
Normal file
75
src/common/datasource/src/error.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
UnsupportedBackendProtocol { protocol: String },
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String },
|
||||
|
||||
#[snafu(display("Invalid path: {}", path))]
|
||||
InvalidPath { path: String },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
InvalidUrl { url: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| InvalidConnection { .. }
|
||||
| InvalidUrl { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -12,4 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! PromQL functions
|
||||
pub mod error;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod util;
|
||||
83
src/common/datasource/src/lister.rs
Normal file
83
src/common/datasource/src/lister.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::{future, TryStreamExt};
|
||||
use object_store::{Entry, ObjectStore};
|
||||
use regex::Regex;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
pub struct Lister {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl Lister {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
) -> Self {
|
||||
Lister {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Entry>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.list(&self.path)
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = self
|
||||
.regex
|
||||
.as_ref()
|
||||
.map(|x| x.is_match(f.name()))
|
||||
.unwrap_or(true);
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
// make sure this file exists
|
||||
let file_full_path = format!("{}{}", self.path, filename);
|
||||
let _ = self.object_store.stat(&file_full_path).await.context(
|
||||
error::ListObjectsSnafu {
|
||||
path: &file_full_path,
|
||||
},
|
||||
)?;
|
||||
Ok(vec![Entry::new(&file_full_path)])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
60
src/common/datasource/src/object_store.rs
Normal file
60
src/common/datasource/src/object_store.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod fs;
|
||||
pub mod s3;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::ObjectStore;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use self::fs::build_fs_backend;
|
||||
use self::s3::build_s3_backend;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// parse url returns (schema,Option<host>,path)
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
Ok(url) => Ok((
|
||||
url.scheme().to_string(),
|
||||
url.host_str().map(|s| s.to_string()),
|
||||
url.path().to_string(),
|
||||
)),
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let host = host.context(error::EmptyHostPathSnafu {
|
||||
url: url.to_string(),
|
||||
})?;
|
||||
Ok(build_s3_backend(&host, "/", connection)?)
|
||||
}
|
||||
FS_SCHEMA => Ok(build_fs_backend("/")?),
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
|
||||
}
|
||||
}
|
||||
28
src/common/datasource/src/object_store/fs.rs
Normal file
28
src/common/datasource/src/object_store/fs.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use object_store::services::Fs;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{BuildBackendSnafu, Result};
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let mut builder = Fs::default();
|
||||
builder.root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(BuildBackendSnafu)?
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
79
src/common/datasource/src/object_store/s3.rs
Normal file
79
src/common/datasource/src/object_store/s3.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::services::S3;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: &str,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
builder.bucket(host);
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.finish())
|
||||
}
|
||||
125
src/common/datasource/src/util.rs
Normal file
125
src/common/datasource/src/util.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,11 +11,17 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::sync::Arc;
|
||||
mod to_unixtime;
|
||||
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct TimestampFunction;
|
||||
|
||||
impl TimestampFunction {
|
||||
pub fn register(_registry: &FunctionRegistry) {}
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction::default()));
|
||||
}
|
||||
}
|
||||
|
||||
148
src/common/function/src/scalars/timestamp/to_unixtime.rs
Normal file
148
src/common/function/src/scalars/timestamp/to_unixtime.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToUnixtimeFunction;
|
||||
|
||||
const NAME: &str = "to_unixtime";
|
||||
|
||||
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||
match Timestamp::from_str(arg) {
|
||||
Ok(ts) => {
|
||||
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
|
||||
Some(ts.value().div_euclid(sec_mul))
|
||||
}
|
||||
Err(_err) => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ToUnixtimeFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::timestamp_second_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::String(StringType)],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 1,
|
||||
error::InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly one, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array).unwrap();
|
||||
Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
)))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ToUnixtimeFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "TO_UNIXTIME")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::StringType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_to_unixtime() {
|
||||
let f = ToUnixtimeFunction::default();
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Exact(valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::String(StringType)]
|
||||
));
|
||||
|
||||
let times = vec![
|
||||
Some("2023-03-01T06:35:02Z"),
|
||||
None,
|
||||
Some("2022-06-30T23:59:60Z"),
|
||||
Some("invalid_time_stamp"),
|
||||
];
|
||||
let results = vec![Some(1677652502), None, Some(1656633600), None];
|
||||
let args: Vec<VectorRef> = vec![Arc::new(StringVector::from(times.clone()))];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ pub enum Error {
|
||||
DecodeInsert { source: DecodeError },
|
||||
|
||||
#[snafu(display("Illegal insert data"))]
|
||||
IllegalInsertData,
|
||||
IllegalInsertData { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
|
||||
@@ -26,7 +26,7 @@ tower = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
[[bench]]
|
||||
name = "bench_main"
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
async-stream.workspace = true
|
||||
common-error = { path = "../error" }
|
||||
common-runtime = { path = "../runtime" }
|
||||
common-telemetry = { path = "../telemetry" }
|
||||
|
||||
@@ -423,7 +423,6 @@ impl ProcedureManager for LocalManager {
|
||||
mod test_util {
|
||||
use common_test_util::temp_dir::TempDir;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -433,8 +432,9 @@ mod test_util {
|
||||
|
||||
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
ObjectStore::new(accessor).finish()
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
ObjectStore::new(builder).unwrap().finish()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -473,8 +473,7 @@ mod tests {
|
||||
|
||||
async fn check_files(object_store: &ObjectStore, procedure_id: ProcedureId, files: &[&str]) {
|
||||
let dir = format!("{procedure_id}/");
|
||||
let object = object_store.object(&dir);
|
||||
let lister = object.list().await.unwrap();
|
||||
let lister = object_store.list(&dir).await.unwrap();
|
||||
let mut files_in_dir: Vec<_> = lister
|
||||
.map_ok(|de| de.name().to_string())
|
||||
.try_collect()
|
||||
|
||||
@@ -248,15 +248,15 @@ mod tests {
|
||||
use async_trait::async_trait;
|
||||
use common_test_util::temp_dir::{create_temp_dir, TempDir};
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::{Context, LockKey, Procedure, Status};
|
||||
|
||||
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
|
||||
ProcedureStore::from(object_store)
|
||||
}
|
||||
|
||||
@@ -15,12 +15,13 @@
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use async_trait::async_trait;
|
||||
use futures::{Stream, TryStreamExt};
|
||||
use object_store::{ObjectMode, ObjectStore};
|
||||
use futures::{Stream, StreamExt};
|
||||
use object_store::{EntryMode, Metakey, ObjectStore};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
|
||||
use crate::error::{DeleteStateSnafu, Error, ListStateSnafu, PutStateSnafu, Result};
|
||||
|
||||
/// Key value from state store.
|
||||
type KeyValue = (String, Vec<u8>);
|
||||
@@ -64,49 +65,49 @@ impl ObjectStateStore {
|
||||
#[async_trait]
|
||||
impl StateStore for ObjectStateStore {
|
||||
async fn put(&self, key: &str, value: Vec<u8>) -> Result<()> {
|
||||
let object = self.store.object(key);
|
||||
object.write(value).await.context(PutStateSnafu { key })
|
||||
self.store
|
||||
.write(key, value)
|
||||
.await
|
||||
.context(PutStateSnafu { key })
|
||||
}
|
||||
|
||||
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
|
||||
let path_string = path.to_string();
|
||||
|
||||
let lister = self
|
||||
.store
|
||||
.object(path)
|
||||
.scan()
|
||||
.await
|
||||
.map_err(|e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
})?;
|
||||
let mut lister = self.store.scan(path).await.map_err(|e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
let stream = lister
|
||||
.try_filter_map(|entry| async move {
|
||||
let store = self.store.clone();
|
||||
|
||||
let stream = try_stream!({
|
||||
while let Some(res) = lister.next().await {
|
||||
let entry = res.context(ListStateSnafu { path: &path_string })?;
|
||||
let key = entry.path();
|
||||
let key_value = match entry.mode().await? {
|
||||
ObjectMode::FILE => {
|
||||
let value = entry.read().await?;
|
||||
|
||||
Some((key.to_string(), value))
|
||||
}
|
||||
ObjectMode::DIR | ObjectMode::Unknown => None,
|
||||
};
|
||||
|
||||
Ok(key_value)
|
||||
})
|
||||
.map_err(move |e| Error::ListState {
|
||||
path: path_string.clone(),
|
||||
source: e,
|
||||
});
|
||||
let metadata = store
|
||||
.metadata(&entry, Metakey::Mode)
|
||||
.await
|
||||
.context(ListStateSnafu { path: key })?;
|
||||
if let EntryMode::FILE = metadata.mode() {
|
||||
let value = store
|
||||
.read(key)
|
||||
.await
|
||||
.context(ListStateSnafu { path: key })?;
|
||||
yield (key.to_string(), value);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
async fn delete(&self, keys: &[String]) -> Result<()> {
|
||||
for key in keys {
|
||||
let object = self.store.object(key);
|
||||
object.delete().await.context(DeleteStateSnafu { key })?;
|
||||
self.store
|
||||
.delete(key)
|
||||
.await
|
||||
.context(DeleteStateSnafu { key })?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -116,8 +117,8 @@ impl StateStore for ObjectStateStore {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::ObjectStoreBuilder;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -125,8 +126,10 @@ mod tests {
|
||||
async fn test_object_state_store() {
|
||||
let dir = create_temp_dir("state_store");
|
||||
let store_dir = dir.path().to_str().unwrap();
|
||||
let accessor = Builder::default().root(store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let mut builder = Builder::default();
|
||||
builder.root(store_dir);
|
||||
|
||||
let object_store = ObjectStore::new(builder).unwrap().finish();
|
||||
let state_store = ObjectStateStore::new(object_store);
|
||||
|
||||
let data: Vec<_> = state_store
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::prelude::*;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -70,6 +71,26 @@ pub enum Error {
|
||||
source: datafusion_common::DataFusionError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Column {} not exists in table {}", column_name, table_name))]
|
||||
ColumnNotExists {
|
||||
column_name: String,
|
||||
table_name: String,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to cast vector of type '{:?}' to type '{:?}', source: {}",
|
||||
from_type,
|
||||
to_type,
|
||||
source
|
||||
))]
|
||||
CastVector {
|
||||
from_type: ConcreteDataType,
|
||||
to_type: ConcreteDataType,
|
||||
#[snafu(backtrace)]
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -81,11 +102,14 @@ impl ErrorExt for Error {
|
||||
| Error::CreateRecordBatches { .. }
|
||||
| Error::PollStream { .. }
|
||||
| Error::Format { .. }
|
||||
| Error::InitRecordbatchStream { .. } => StatusCode::Internal,
|
||||
| Error::InitRecordbatchStream { .. }
|
||||
| Error::ColumnNotExists { .. } => StatusCode::Internal,
|
||||
|
||||
Error::External { source } => source.status_code(),
|
||||
|
||||
Error::SchemaConversion { source, .. } => source.status_code(),
|
||||
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,14 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use serde::ser::{Error, SerializeStruct};
|
||||
use serde::{Serialize, Serializer};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, CastVectorSnafu, ColumnNotExistsSnafu, Result};
|
||||
use crate::DfRecordBatch;
|
||||
|
||||
/// A two-dimensional batch of column-oriented data with a defined schema.
|
||||
@@ -108,6 +110,41 @@ impl RecordBatch {
|
||||
pub fn rows(&self) -> RecordBatchRowIterator<'_> {
|
||||
RecordBatchRowIterator::new(self)
|
||||
}
|
||||
|
||||
pub fn column_vectors(
|
||||
&self,
|
||||
table_name: &str,
|
||||
table_schema: SchemaRef,
|
||||
) -> Result<HashMap<String, VectorRef>> {
|
||||
let mut vectors = HashMap::with_capacity(self.num_columns());
|
||||
|
||||
// column schemas in recordbatch must match its vectors, otherwise it's corrupted
|
||||
for (vector_schema, vector) in self.schema.column_schemas().iter().zip(self.columns.iter())
|
||||
{
|
||||
let column_name = &vector_schema.name;
|
||||
let column_schema =
|
||||
table_schema
|
||||
.column_schema_by_name(column_name)
|
||||
.context(ColumnNotExistsSnafu {
|
||||
table_name,
|
||||
column_name,
|
||||
})?;
|
||||
let vector = if vector_schema.data_type != column_schema.data_type {
|
||||
vector
|
||||
.cast(&column_schema.data_type)
|
||||
.with_context(|_| CastVectorSnafu {
|
||||
from_type: vector.data_type(),
|
||||
to_type: column_schema.data_type.clone(),
|
||||
})?
|
||||
} else {
|
||||
vector.clone()
|
||||
};
|
||||
|
||||
vectors.insert(column_name.clone(), vector);
|
||||
}
|
||||
|
||||
Ok(vectors)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for RecordBatch {
|
||||
|
||||
@@ -12,4 +12,4 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![feature(int_roundings)]
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -26,6 +26,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
|
||||
use crate::util::div_ceil;
|
||||
|
||||
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
|
||||
pub struct Timestamp {
|
||||
@@ -143,7 +144,7 @@ impl Timestamp {
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
|
||||
Some(Timestamp::new(div_ceil(self.value, mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,17 @@ pub fn current_time_millis() -> i64 {
|
||||
chrono::Utc::now().timestamp_millis()
|
||||
}
|
||||
|
||||
/// Port of rust unstable features `int_roundings`.
|
||||
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
|
||||
let d = this / rhs;
|
||||
let r = this % rhs;
|
||||
if r > 0 && rhs > 0 {
|
||||
d + 1
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::{self, SystemTime};
|
||||
@@ -42,4 +53,10 @@ mod tests {
|
||||
assert_eq!(datetime_std.hour(), datetime_now.hour());
|
||||
assert_eq!(datetime_std.minute(), datetime_now.minute());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_div_ceil() {
|
||||
let v0 = 9223372036854676001;
|
||||
assert_eq!(9223372036854677, div_ceil(v0, 1000));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ catalog = { path = "../catalog" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-datasource = { path = "../common/datasource" }
|
||||
common-function = { path = "../common/function" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
|
||||
@@ -255,7 +255,7 @@ impl Datanode {
|
||||
self.instance.clone()
|
||||
}
|
||||
|
||||
async fn shutdown_instance(&self) -> Result<()> {
|
||||
pub async fn shutdown_instance(&self) -> Result<()> {
|
||||
self.instance.shutdown().await
|
||||
}
|
||||
|
||||
|
||||
@@ -14,11 +14,10 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_datasource::error::Error as DataSourceError;
|
||||
use common_error::prelude::*;
|
||||
use common_procedure::ProcedureId;
|
||||
use common_recordbatch::error::Error as RecordBatchError;
|
||||
use datafusion::parquet;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use storage::error::Error as StorageError;
|
||||
use table::error::Error as TableError;
|
||||
use url::ParseError;
|
||||
@@ -124,24 +123,6 @@ pub enum Error {
|
||||
))]
|
||||
ColumnValuesNumberMismatch { columns: usize, values: usize },
|
||||
|
||||
#[snafu(display(
|
||||
"Column type mismatch, column: {}, expected type: {:?}, actual: {:?}",
|
||||
column,
|
||||
expected,
|
||||
actual,
|
||||
))]
|
||||
ColumnTypeMismatch {
|
||||
column: String,
|
||||
expected: ConcreteDataType,
|
||||
actual: ConcreteDataType,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect record batch, source: {}", source))]
|
||||
CollectRecords {
|
||||
#[snafu(backtrace)]
|
||||
source: RecordBatchError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse sql value, source: {}", source))]
|
||||
ParseSqlValue {
|
||||
#[snafu(backtrace)]
|
||||
@@ -218,7 +199,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse url, source: {}", source))]
|
||||
ParseUrl {
|
||||
source: DataSourceError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
@@ -249,6 +236,12 @@ pub enum Error {
|
||||
source: regex::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list objects, source: {}", source))]
|
||||
ListObjects {
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse the data, source: {}", source))]
|
||||
ParseDataTypes {
|
||||
#[snafu(backtrace)]
|
||||
@@ -475,13 +468,6 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to lists object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
@@ -550,8 +536,6 @@ impl ErrorExt for Error {
|
||||
|
||||
Insert { source, .. } => source.status_code(),
|
||||
Delete { source, .. } => source.status_code(),
|
||||
CollectRecords { source, .. } => source.status_code(),
|
||||
|
||||
TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
|
||||
|
||||
@@ -564,7 +548,6 @@ impl ErrorExt for Error {
|
||||
ConvertSchema { source, .. } | VectorComputation { source } => source.status_code(),
|
||||
|
||||
ColumnValuesNumberMismatch { .. }
|
||||
| ColumnTypeMismatch { .. }
|
||||
| InvalidSql { .. }
|
||||
| InvalidUrl { .. }
|
||||
| InvalidPath { .. }
|
||||
@@ -584,7 +567,8 @@ impl ErrorExt for Error {
|
||||
| DatabaseNotFound { .. }
|
||||
| MissingNodeId { .. }
|
||||
| MissingMetasrvOpts { .. }
|
||||
| ColumnNoneDefaultValue { .. } => StatusCode::InvalidArguments,
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| ParseUrl { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
// TODO(yingwen): Further categorize http error.
|
||||
StartServer { .. }
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
||||
use catalog::{region_stats, CatalogManagerRef};
|
||||
use catalog::{datanode_stat, CatalogManagerRef};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use meta_client::client::{HeartbeatSender, MetaClient};
|
||||
use snafu::ResultExt;
|
||||
@@ -106,13 +106,7 @@ impl HeartbeatTask {
|
||||
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
||||
common_runtime::spawn_bg(async move {
|
||||
while running.load(Ordering::Acquire) {
|
||||
let (region_num, region_stats) = match region_stats(&catalog_manager_clone).await {
|
||||
Ok(region_stats) => (region_stats.len() as i64, region_stats),
|
||||
Err(e) => {
|
||||
error!("failed to get region status, err: {e:?}");
|
||||
(-1, vec![])
|
||||
}
|
||||
};
|
||||
let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
|
||||
|
||||
let req = HeartbeatRequest {
|
||||
peer: Some(Peer {
|
||||
@@ -120,7 +114,7 @@ impl HeartbeatTask {
|
||||
addr: addr.clone(),
|
||||
}),
|
||||
node_stat: Some(NodeStat {
|
||||
region_num,
|
||||
region_num: region_num as _,
|
||||
..Default::default()
|
||||
}),
|
||||
region_stats,
|
||||
|
||||
@@ -37,12 +37,14 @@ use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder
|
||||
use object_store::{util, ObjectStore, ObjectStoreBuilder};
|
||||
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::prelude::*;
|
||||
use storage::compaction::{CompactionHandler, CompactionSchedulerRef, SimplePicker};
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::scheduler::{LocalScheduler, SchedulerConfig};
|
||||
use storage::EngineImpl;
|
||||
use store_api::logstore::LogStore;
|
||||
use table::requests::FlushTableRequest;
|
||||
use table::table::numbers::NumbersTable;
|
||||
use table::table::TableIdProviderRef;
|
||||
use table::Table;
|
||||
@@ -56,7 +58,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::script::ScriptExecutor;
|
||||
use crate::sql::SqlHandler;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
mod grpc;
|
||||
mod script;
|
||||
@@ -202,7 +204,6 @@ impl Instance {
|
||||
sql_handler: SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_manager.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
procedure_manager,
|
||||
),
|
||||
@@ -233,6 +234,8 @@ impl Instance {
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
}
|
||||
|
||||
self.flush_tables().await?;
|
||||
|
||||
self.sql_handler
|
||||
.close()
|
||||
.await
|
||||
@@ -240,6 +243,43 @@ impl Instance {
|
||||
.context(ShutdownInstanceSnafu)
|
||||
}
|
||||
|
||||
pub async fn flush_tables(&self) -> Result<()> {
|
||||
info!("going to flush all schemas");
|
||||
let schema_list = self
|
||||
.catalog_manager
|
||||
.catalog(DEFAULT_CATALOG_NAME)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?
|
||||
.expect("Default schema not found")
|
||||
.schema_names()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu)?;
|
||||
let flush_requests = schema_list
|
||||
.into_iter()
|
||||
.map(|schema_name| {
|
||||
SqlRequest::FlushTable(FlushTableRequest {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name,
|
||||
table_name: None,
|
||||
region_number: None,
|
||||
wait: Some(true),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let flush_result = futures::future::try_join_all(
|
||||
flush_requests
|
||||
.into_iter()
|
||||
.map(|request| self.sql_handler.execute(request, QueryContext::arc())),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ShutdownInstanceSnafu);
|
||||
info!("Flushed all tables result: {}", flush_result.is_ok());
|
||||
flush_result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sql_handler(&self) -> &SqlHandler {
|
||||
&self.sql_handler
|
||||
}
|
||||
@@ -290,18 +330,20 @@ pub(crate) async fn new_oss_object_store(store_config: &ObjectStoreConfig) -> Re
|
||||
);
|
||||
|
||||
let mut builder = OSSBuilder::default();
|
||||
let builder = builder
|
||||
builder
|
||||
.root(&root)
|
||||
.bucket(&oss_config.bucket)
|
||||
.endpoint(&oss_config.endpoint)
|
||||
.access_key_id(&oss_config.access_key_id)
|
||||
.access_key_secret(&oss_config.access_key_secret);
|
||||
|
||||
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?
|
||||
.finish();
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
create_object_store_with_cache(object_store, store_config)
|
||||
}
|
||||
|
||||
fn create_object_store_with_cache(
|
||||
@@ -354,24 +396,27 @@ pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
);
|
||||
|
||||
let mut builder = S3Builder::default();
|
||||
let mut builder = builder
|
||||
builder
|
||||
.root(&root)
|
||||
.bucket(&s3_config.bucket)
|
||||
.access_key_id(&s3_config.access_key_id)
|
||||
.secret_access_key(&s3_config.secret_access_key);
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
}
|
||||
if s3_config.region.is_some() {
|
||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||
builder.region(s3_config.region.as_ref().unwrap());
|
||||
}
|
||||
|
||||
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
|
||||
create_object_store_with_cache(
|
||||
ObjectStore::new(builder)
|
||||
.with_context(|_| error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?
|
||||
.finish(),
|
||||
store_config,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
|
||||
@@ -386,15 +431,14 @@ pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Res
|
||||
|
||||
let atomic_write_dir = format!("{data_dir}/.tmp/");
|
||||
|
||||
let accessor = FsBuilder::default()
|
||||
.root(&data_dir)
|
||||
.atomic_write_dir(&atomic_write_dir)
|
||||
.build()
|
||||
let mut builder = FsBuilder::default();
|
||||
builder.root(&data_dir).atomic_write_dir(&atomic_write_dir);
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu {
|
||||
config: store_config.clone(),
|
||||
})?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
})?
|
||||
.finish())
|
||||
}
|
||||
|
||||
/// Create metasrv client instance and spawn heartbeat loop.
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_query::Output;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::plan::LogicalPlan;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::statements::statement::Statement;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
@@ -53,7 +53,7 @@ impl Instance {
|
||||
.context(DecodeLogicalPlanSnafu)?;
|
||||
|
||||
self.query_engine
|
||||
.execute(&LogicalPlan::DfPlan(logical_plan))
|
||||
.execute(LogicalPlan::DfPlan(logical_plan), QueryContext::arc())
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
@@ -69,11 +69,11 @@ impl Instance {
|
||||
let plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(stmt, ctx)
|
||||
.plan(stmt, ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.execute(plan, ctx)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
@@ -175,7 +175,7 @@ mod test {
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
engine.execute(&plan).await.unwrap()
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
|
||||
@@ -19,7 +19,6 @@ use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_telemetry::logging::info;
|
||||
use common_telemetry::timer;
|
||||
use futures::StreamExt;
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::query_engine::StatementHandler;
|
||||
@@ -28,12 +27,10 @@ use servers::prom::PromHandler;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::ast::ObjectName;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::copy::{CopyTable, CopyTableArgument};
|
||||
use sql::statements::statement::Statement;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{
|
||||
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
||||
};
|
||||
use table::requests::{CopyDirection, CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
|
||||
@@ -41,8 +38,7 @@ use crate::error::{
|
||||
};
|
||||
use crate::instance::Instance;
|
||||
use crate::metric;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
use crate::sql::SqlRequest;
|
||||
use crate::sql::{SqlHandler, SqlRequest};
|
||||
|
||||
impl Instance {
|
||||
pub async fn execute_stmt(
|
||||
@@ -52,37 +48,10 @@ impl Instance {
|
||||
) -> Result<Output> {
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Insert(insert)) => {
|
||||
let requests = self
|
||||
.sql_handler
|
||||
.insert_to_requests(self.catalog_manager.clone(), *insert, query_ctx.clone())
|
||||
.await?;
|
||||
|
||||
match requests {
|
||||
InsertRequests::Request(request) => {
|
||||
self.sql_handler.execute(request, query_ctx.clone()).await
|
||||
}
|
||||
|
||||
InsertRequests::Stream(mut s) => {
|
||||
let mut rows = 0;
|
||||
while let Some(request) = s.next().await {
|
||||
match self
|
||||
.sql_handler
|
||||
.execute(request?, query_ctx.clone())
|
||||
.await?
|
||||
{
|
||||
Output::AffectedRows(n) => {
|
||||
rows += n;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
Ok(Output::AffectedRows(rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
QueryStatement::Sql(Statement::Delete(delete)) => {
|
||||
let request = SqlRequest::Delete(*delete);
|
||||
self.sql_handler.execute(request, query_ctx).await
|
||||
let request =
|
||||
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
|
||||
.await?;
|
||||
self.sql_handler.insert(request).await
|
||||
}
|
||||
QueryStatement::Sql(Statement::CreateDatabase(create_database)) => {
|
||||
let request = CreateDatabaseRequest {
|
||||
@@ -160,43 +129,59 @@ impl Instance {
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||
}
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let file_name = copy_table.file_name;
|
||||
let req = CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
file_name,
|
||||
connection: copy_table.connection,
|
||||
};
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => {
|
||||
let req = match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Export,
|
||||
}
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Import,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let req = CopyTableFromRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
connection: copy_table.connection,
|
||||
pattern: copy_table.pattern,
|
||||
from: copy_table.from,
|
||||
};
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTableFrom(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
},
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Query(_))
|
||||
| QueryStatement::Sql(Statement::Explain(_))
|
||||
| QueryStatement::Sql(Statement::Use(_))
|
||||
| QueryStatement::Sql(Statement::Tql(_))
|
||||
| QueryStatement::Sql(Statement::Delete(_))
|
||||
| QueryStatement::Promql(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -213,10 +198,13 @@ impl Instance {
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
// TODO(ruihang): merge this and `execute_promql` after #951 landed
|
||||
@@ -249,10 +237,13 @@ impl Instance {
|
||||
let engine = self.query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteStatementSnafu)
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteStatementSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -95,6 +95,7 @@ impl Instance {
|
||||
schema_name: expr.schema_name,
|
||||
table_name,
|
||||
region_number: expr.region_id,
|
||||
wait: None,
|
||||
};
|
||||
self.sql_handler()
|
||||
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||
|
||||
@@ -17,11 +17,9 @@ use common_error::prelude::BoxedError;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use query::query_engine::QueryEngineRef;
|
||||
use query::sql::{describe_table, show_databases, show_tables};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::describe::DescribeTable;
|
||||
use sql::statements::show::{ShowDatabases, ShowTables};
|
||||
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
|
||||
@@ -34,17 +32,15 @@ use crate::error::{
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
|
||||
mod alter;
|
||||
mod copy_table;
|
||||
mod copy_table_from;
|
||||
mod copy_table_to;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
mod flush_table;
|
||||
pub(crate) mod insert;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SqlRequest {
|
||||
Insert(InsertRequest),
|
||||
CreateTable(CreateTableRequest),
|
||||
CreateDatabase(CreateDatabaseRequest),
|
||||
Alter(AlterTableRequest),
|
||||
@@ -53,16 +49,14 @@ pub enum SqlRequest {
|
||||
ShowDatabases(ShowDatabases),
|
||||
ShowTables(ShowTables),
|
||||
DescribeTable(DescribeTable),
|
||||
Delete(Delete),
|
||||
CopyTable(CopyTableRequest),
|
||||
CopyTableFrom(CopyTableFromRequest),
|
||||
}
|
||||
|
||||
// Handler to execute SQL except query
|
||||
#[derive(Clone)]
|
||||
pub struct SqlHandler {
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
@@ -71,14 +65,12 @@ impl SqlHandler {
|
||||
pub fn new(
|
||||
table_engine: TableEngineRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
query_engine: QueryEngineRef,
|
||||
engine_procedure: TableEngineProcedureRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_engine,
|
||||
catalog_manager,
|
||||
query_engine,
|
||||
engine_procedure,
|
||||
procedure_manager,
|
||||
}
|
||||
@@ -90,14 +82,14 @@ impl SqlHandler {
|
||||
// there, instead of executing here in a "static" fashion.
|
||||
pub async fn execute(&self, request: SqlRequest, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let result = match request {
|
||||
SqlRequest::Insert(req) => self.insert(req).await,
|
||||
SqlRequest::CreateTable(req) => self.create_table(req).await,
|
||||
SqlRequest::CreateDatabase(req) => self.create_database(req, query_ctx.clone()).await,
|
||||
SqlRequest::Alter(req) => self.alter(req).await,
|
||||
SqlRequest::DropTable(req) => self.drop_table(req).await,
|
||||
SqlRequest::Delete(req) => self.delete(query_ctx.clone(), req).await,
|
||||
SqlRequest::CopyTable(req) => self.copy_table(req).await,
|
||||
SqlRequest::CopyTableFrom(req) => self.copy_table_from(req).await,
|
||||
SqlRequest::CopyTable(req) => match req.direction {
|
||||
CopyDirection::Export => self.copy_table_to(req).await,
|
||||
CopyDirection::Import => self.copy_table_from(req).await,
|
||||
},
|
||||
SqlRequest::ShowDatabases(req) => {
|
||||
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
@@ -149,239 +141,3 @@ impl SqlHandler {
|
||||
.context(CloseTableEngineSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::{CatalogManager, RegisterTableRequest};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::logical_plan::Expr;
|
||||
use common_query::physical_plan::PhysicalPlanRef;
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use futures::StreamExt;
|
||||
use log_store::NoopLogStore;
|
||||
use mito::config::EngineConfig as TableEngineConfig;
|
||||
use mito::engine::MitoEngine;
|
||||
use object_store::services::Fs as Builder;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use session::context::QueryContext;
|
||||
use sql::statements::statement::Statement;
|
||||
use storage::compaction::noop::NoopCompactionScheduler;
|
||||
use storage::config::EngineConfig as StorageEngineConfig;
|
||||
use storage::EngineImpl;
|
||||
use table::error::Result as TableResult;
|
||||
use table::metadata::TableInfoRef;
|
||||
use table::Table;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::sql::insert::InsertRequests;
|
||||
|
||||
struct DemoTable;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for DemoTable {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
)
|
||||
.with_time_index(true),
|
||||
];
|
||||
|
||||
Arc::new(
|
||||
SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
fn table_info(&self) -> TableInfoRef {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn scan(
|
||||
&self,
|
||||
_projection: Option<&Vec<usize>>,
|
||||
_filters: &[Expr],
|
||||
_limit: Option<usize>,
|
||||
) -> TableResult<PhysicalPlanRef> {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_statement_to_request() {
|
||||
let dir = create_temp_dir("setup_test_engine_and_table");
|
||||
let store_dir = dir.path().to_string_lossy();
|
||||
let accessor = Builder::default().root(&store_dir).build().unwrap();
|
||||
let object_store = ObjectStore::new(accessor).finish();
|
||||
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
|
||||
let sql = r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#;
|
||||
|
||||
let table_engine = Arc::new(MitoEngine::<EngineImpl<NoopLogStore>>::new(
|
||||
TableEngineConfig::default(),
|
||||
EngineImpl::new(
|
||||
StorageEngineConfig::default(),
|
||||
Arc::new(NoopLogStore::default()),
|
||||
object_store.clone(),
|
||||
compaction_scheduler,
|
||||
),
|
||||
object_store,
|
||||
));
|
||||
|
||||
let catalog_list = Arc::new(
|
||||
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
catalog_list.start().await.unwrap();
|
||||
assert!(catalog_list
|
||||
.register_table(RegisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: "demo".to_string(),
|
||||
table_id: 1,
|
||||
table: Arc::new(DemoTable),
|
||||
})
|
||||
.await
|
||||
.unwrap());
|
||||
|
||||
let factory = QueryEngineFactory::new(catalog_list.clone());
|
||||
let query_engine = factory.query_engine();
|
||||
let sql_handler = SqlHandler::new(
|
||||
table_engine.clone(),
|
||||
catalog_list.clone(),
|
||||
query_engine.clone(),
|
||||
table_engine,
|
||||
None,
|
||||
);
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Request(SqlRequest::Insert(req)) => {
|
||||
assert_eq!(req.table_name, "demo");
|
||||
let columns_values = req.columns_values;
|
||||
assert_eq!(4, columns_values.len());
|
||||
|
||||
let hosts = &columns_values["host"];
|
||||
assert_eq!(2, hosts.len());
|
||||
assert_eq!(Value::from("host1"), hosts.get(0));
|
||||
assert_eq!(Value::from("host2"), hosts.get(1));
|
||||
|
||||
let cpus = &columns_values["cpu"];
|
||||
assert_eq!(2, cpus.len());
|
||||
assert_eq!(Value::from(66.6f64), cpus.get(0));
|
||||
assert_eq!(Value::from(88.8f64), cpus.get(1));
|
||||
|
||||
let memories = &columns_values["memory"];
|
||||
assert_eq!(2, memories.len());
|
||||
assert_eq!(Value::from(1024f64), memories.get(0));
|
||||
assert_eq!(Value::from(333.3f64), memories.get(1));
|
||||
|
||||
let ts = &columns_values["ts"];
|
||||
assert_eq!(2, ts.len());
|
||||
assert_eq!(
|
||||
Value::from(Timestamp::new_millisecond(1655276557000i64)),
|
||||
ts.get(0)
|
||||
);
|
||||
assert_eq!(
|
||||
Value::from(Timestamp::new_millisecond(1655276558000i64)),
|
||||
ts.get(1)
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
panic!("Not supposed to reach here")
|
||||
}
|
||||
}
|
||||
|
||||
// test inert into select
|
||||
|
||||
// type mismatch
|
||||
let sql = "insert into demo(ts) select number from numbers limit 3";
|
||||
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Stream(mut stream) => {
|
||||
assert!(matches!(
|
||||
stream.next().await.unwrap().unwrap_err(),
|
||||
Error::ColumnTypeMismatch { .. }
|
||||
));
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let sql = "insert into demo(cpu) select cast(number as double) from numbers limit 3";
|
||||
let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
|
||||
QueryStatement::Sql(Statement::Insert(i)) => i,
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
};
|
||||
let request = sql_handler
|
||||
.insert_to_requests(catalog_list.clone(), *stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
match request {
|
||||
InsertRequests::Stream(mut stream) => {
|
||||
let mut times = 0;
|
||||
while let Some(Ok(SqlRequest::Insert(req))) = stream.next().await {
|
||||
times += 1;
|
||||
assert_eq!(req.table_name, "demo");
|
||||
let columns_values = req.columns_values;
|
||||
assert_eq!(1, columns_values.len());
|
||||
|
||||
let memories = &columns_values["cpu"];
|
||||
assert_eq!(3, memories.len());
|
||||
assert_eq!(Value::from(0.0f64), memories.get(0));
|
||||
assert_eq!(Value::from(1.0f64), memories.get(1));
|
||||
assert_eq!(Value::from(2.0f64), memories.get(2));
|
||||
}
|
||||
assert_eq!(1, times);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,35 +15,26 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_compat::CompatExt;
|
||||
use common_datasource::lister::{Lister, Source};
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_datasource::util::find_dir_and_filename;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::DataTypesSnafu;
|
||||
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
|
||||
use datatypes::arrow::record_batch::RecordBatch;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use futures::future;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::{Object, ObjectStore, ObjectStoreBuilder};
|
||||
use regex::Regex;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{CopyTableFromRequest, InsertRequest};
|
||||
use table::requests::{CopyTableRequest, InsertRequest};
|
||||
use tokio::io::BufReader;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableFromRequest) -> Result<Output> {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
@@ -51,16 +42,37 @@ impl SqlHandler {
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let datasource = DataSource::new(&req.from, req.pattern, req.connection)?;
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
|
||||
let objects = datasource.list().await?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let (dir, filename) = find_dir_and_filename(&path);
|
||||
let regex = req
|
||||
.pattern
|
||||
.as_ref()
|
||||
.map(|x| Regex::new(x))
|
||||
.transpose()
|
||||
.context(error::BuildRegexSnafu)?;
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let lister = Lister::new(object_store.clone(), source, dir, regex);
|
||||
|
||||
let entries = lister.list().await.context(error::ListObjectsSnafu)?;
|
||||
|
||||
let mut buf: Vec<RecordBatch> = Vec::new();
|
||||
|
||||
for obj in objects.iter() {
|
||||
let reader = obj.reader().await.context(error::ReadObjectSnafu {
|
||||
path: &obj.path().to_string(),
|
||||
})?;
|
||||
for entry in entries.iter() {
|
||||
let path = entry.path();
|
||||
let reader = object_store
|
||||
.reader(path)
|
||||
.await
|
||||
.context(error::ReadObjectSnafu { path })?;
|
||||
|
||||
let buf_reader = BufReader::new(reader.compat());
|
||||
|
||||
@@ -131,321 +143,3 @@ impl SqlHandler {
|
||||
Ok(Output::AffectedRows(result.iter().sum()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
struct DataSource {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl DataSource {
|
||||
fn from_path(url: &str, regex: Option<Regex>) -> Result<DataSource> {
|
||||
let result = if url.ends_with('/') {
|
||||
Url::from_directory_path(url)
|
||||
} else {
|
||||
Url::from_file_path(url)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let path = url.path();
|
||||
|
||||
let (path, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = build_fs_backend(&path)?;
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
Err(()) => error::InvalidPathSnafu {
|
||||
path: url.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_url(
|
||||
url: Url,
|
||||
regex: Option<Regex>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let host = url.host_str();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let (dir, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => build_s3_backend(host, &dir, connection)?,
|
||||
_ => {
|
||||
return error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path: dir,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
url: &str,
|
||||
pattern: Option<String>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let regex = if let Some(pattern) = pattern {
|
||||
let regex = Regex::new(&pattern).context(error::BuildRegexSnafu)?;
|
||||
Some(regex)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => DataSource::from_url(url, regex, connection),
|
||||
Err(err) => {
|
||||
if ParseError::RelativeUrlWithoutBase == err {
|
||||
DataSource::from_path(url, regex)
|
||||
} else {
|
||||
Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Object>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.object("/")
|
||||
.list()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = if let Some(regex) = &self.regex {
|
||||
regex.is_match(f.name())
|
||||
} else {
|
||||
true
|
||||
};
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
let obj = self.object_store.object(filename);
|
||||
|
||||
Ok(vec![obj])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: Option<&str>,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
if let Some(bucket) = host {
|
||||
builder.bucket(bucket);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let accessor = Fs::default()
|
||||
.root(root)
|
||||
.build()
|
||||
.context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = DataSource::find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use common_datasource;
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
@@ -27,51 +28,12 @@ use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use super::copy_table_from::{build_fs_backend, build_s3_backend, S3_SCHEMA};
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
fn build_backend(
|
||||
&self,
|
||||
url: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<(ObjectStore, String)> {
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let host = url.host_str();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let object_store = build_s3_backend(host, "/", connection)?;
|
||||
Ok((object_store, path.to_string()))
|
||||
}
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
let object_store = build_fs_backend("/")?;
|
||||
Ok((object_store, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
}),
|
||||
}
|
||||
}
|
||||
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
pub(crate) async fn copy_table_to(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
@@ -91,9 +53,11 @@ impl SqlHandler {
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
||||
|
||||
let (object_store, file_name) = self.build_backend(&req.file_name, req.connection)?;
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let mut parquet_writer = ParquetWriter::new(file_name, stream, object_store);
|
||||
let mut parquet_writer = ParquetWriter::new(path.to_string(), stream, object_store);
|
||||
// TODO(jiachun):
|
||||
// For now, COPY is implemented synchronously.
|
||||
// When copying large table, it will be blocked for a long time.
|
||||
@@ -172,10 +136,10 @@ impl ParquetWriter {
|
||||
// "file_name_1_1000000" (row num: 1 ~ 1000000),
|
||||
// "file_name_1000001_xxx" (row num: 1000001 ~ xxx)
|
||||
let file_name = format!("{}_{}_{}", self.file_name, start_row_num, total_rows);
|
||||
let object = self.object_store.object(&file_name);
|
||||
object.write(buf).await.context(error::WriteObjectSnafu {
|
||||
path: object.path(),
|
||||
})?;
|
||||
self.object_store
|
||||
.write(&file_name, buf)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path: file_name })?;
|
||||
|
||||
if end_loop {
|
||||
return Ok(total_rows);
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::Output;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
use datatypes::vectors::StringVector;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::ast::{BinaryOperator, Expr, Value};
|
||||
use sql::statements::delete::Delete;
|
||||
use sql::statements::sql_value_to_value;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::DeleteRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{ColumnNotFoundSnafu, DeleteSnafu, InvalidSqlSnafu, NotSupportSqlSnafu, Result};
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn delete(&self, query_ctx: QueryContextRef, stmt: Delete) -> Result<Output> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
|
||||
let table_ref = TableReference {
|
||||
catalog: &catalog_name.to_string(),
|
||||
schema: &schema_name.to_string(),
|
||||
table: &table_name.to_string(),
|
||||
};
|
||||
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let req = DeleteRequest {
|
||||
key_column_values: parse_selection(stmt.selection(), &table)?,
|
||||
};
|
||||
|
||||
let affected_rows = table.delete(req).await.with_context(|_| DeleteSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
Ok(Output::AffectedRows(affected_rows))
|
||||
}
|
||||
}
|
||||
|
||||
/// parse selection, currently supported format is `tagkey1 = 'tagvalue1' and 'ts' = 'value'`.
|
||||
/// (only uses =, and in the where clause and provides all columns needed by the key.)
|
||||
fn parse_selection(
|
||||
selection: &Option<Expr>,
|
||||
table: &TableRef,
|
||||
) -> Result<HashMap<String, VectorRef>> {
|
||||
let mut key_column_values = HashMap::new();
|
||||
if let Some(expr) = selection {
|
||||
parse_expr(expr, &mut key_column_values, table)?;
|
||||
}
|
||||
Ok(key_column_values)
|
||||
}
|
||||
|
||||
fn parse_expr(
|
||||
expr: &Expr,
|
||||
key_column_values: &mut HashMap<String, VectorRef>,
|
||||
table: &TableRef,
|
||||
) -> Result<()> {
|
||||
// match BinaryOp
|
||||
if let Expr::BinaryOp { left, op, right } = expr {
|
||||
match (&**left, op, &**right) {
|
||||
// match And operator
|
||||
(Expr::BinaryOp { .. }, BinaryOperator::And, Expr::BinaryOp { .. }) => {
|
||||
parse_expr(left, key_column_values, table)?;
|
||||
parse_expr(right, key_column_values, table)?;
|
||||
return Ok(());
|
||||
}
|
||||
// match Eq operator
|
||||
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Value(value)) => {
|
||||
key_column_values.insert(
|
||||
column_name.to_string(),
|
||||
value_to_vector(&column_name.to_string(), value, table)?,
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
(Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Identifier(value)) => {
|
||||
key_column_values.insert(
|
||||
column_name.to_string(),
|
||||
Arc::new(StringVector::from(vec![value.to_string()])),
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
NotSupportSqlSnafu {
|
||||
msg: format!(
|
||||
"Not support sql expr:{expr},correct format is tagkey1 = tagvalue1 and ts = value"
|
||||
),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// parse value to vector
|
||||
fn value_to_vector(column_name: &String, sql_value: &Value, table: &TableRef) -> Result<VectorRef> {
|
||||
let schema = table.schema();
|
||||
let column_schema =
|
||||
schema
|
||||
.column_schema_by_name(column_name)
|
||||
.with_context(|| ColumnNotFoundSnafu {
|
||||
table_name: table.table_info().name.clone(),
|
||||
column_name: column_name.to_string(),
|
||||
})?;
|
||||
let data_type = &column_schema.data_type;
|
||||
let value = sql_value_to_value(column_name, data_type, sql_value);
|
||||
match value {
|
||||
Ok(value) => {
|
||||
let mut vec = data_type.create_mutable_vector(1);
|
||||
if vec.try_push_value_ref(value.as_value_ref()).is_err() {
|
||||
return InvalidSqlSnafu {
|
||||
msg: format!(
|
||||
"invalid sql, column name is {column_name}, value is {sql_value}",
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
Ok(vec.to_vector())
|
||||
}
|
||||
_ => InvalidSqlSnafu {
|
||||
msg: format!("invalid sql, column name is {column_name}, value is {sql_value}",),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use catalog::SchemaProviderRef;
|
||||
use common_query::Output;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::FlushTableRequest;
|
||||
|
||||
use crate::error::{self, CatalogSnafu, DatabaseNotFoundSnafu, Result};
|
||||
@@ -22,32 +22,22 @@ use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn flush_table(&self, req: FlushTableRequest) -> Result<Output> {
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
let schema = self
|
||||
.catalog_manager
|
||||
.schema(&req.catalog_name, &req.schema_name)
|
||||
.context(CatalogSnafu)?
|
||||
.context(DatabaseNotFoundSnafu {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
})?;
|
||||
|
||||
if let Some(table) = &req.table_name {
|
||||
self.flush_table_inner(schema, table, req.region_number, req.wait)
|
||||
.await?;
|
||||
} else {
|
||||
let all_table_names = schema.table_names().context(CatalogSnafu)?;
|
||||
futures::future::join_all(all_table_names.iter().map(|table| {
|
||||
self.flush_table_inner(
|
||||
&req.catalog_name,
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
)
|
||||
self.flush_table_inner(schema.clone(), table, req.region_number, req.wait)
|
||||
}))
|
||||
.await
|
||||
.into_iter()
|
||||
@@ -58,21 +48,18 @@ impl SqlHandler {
|
||||
|
||||
async fn flush_table_inner(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
schema: SchemaProviderRef,
|
||||
table_name: &str,
|
||||
region: Option<u32>,
|
||||
wait: Option<bool>,
|
||||
) -> Result<()> {
|
||||
let table_ref = TableReference {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
};
|
||||
|
||||
let full_table_name = table_ref.to_string();
|
||||
let table = self.get_table(&table_ref)?;
|
||||
table.flush(region).await.context(error::FlushTableSnafu {
|
||||
table_name: full_table_name,
|
||||
})
|
||||
schema
|
||||
.table(table_name)
|
||||
.await
|
||||
.context(error::FindTableSnafu { table_name })?
|
||||
.context(error::TableNotFoundSnafu { table_name })?
|
||||
.flush(region, wait)
|
||||
.await
|
||||
.context(error::FlushTableSnafu { table_name })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,49 +11,31 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::RecordBatch;
|
||||
use datafusion_expr::type_coercion::binary::coerce_types;
|
||||
use datafusion_expr::Operator;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::vectors::MutableVector;
|
||||
use futures::stream::{self, StreamExt};
|
||||
use futures::Stream;
|
||||
use query::parser::QueryStatement;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
use sql::statements::insert::Insert;
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::{self};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::*;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{
|
||||
CatalogSnafu, CollectRecordsSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
|
||||
ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ColumnValuesNumberMismatchSnafu, Error,
|
||||
ExecuteLogicalPlanSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||
ParseSqlValueSnafu, PlanStatementSnafu, Result, TableNotFoundSnafu,
|
||||
CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
|
||||
ColumnValuesNumberMismatchSnafu, InsertSnafu, MissingInsertBodySnafu, ParseSqlSnafu,
|
||||
ParseSqlValueSnafu, Result, TableNotFoundSnafu,
|
||||
};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler, SqlRequest};
|
||||
use crate::sql::{table_idents_to_full_name, SqlHandler};
|
||||
|
||||
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
|
||||
|
||||
type InsertRequestStream = Pin<Box<dyn Stream<Item = Result<SqlRequest>> + Send>>;
|
||||
pub(crate) enum InsertRequests {
|
||||
// Single request
|
||||
Request(SqlRequest),
|
||||
// Streaming requests
|
||||
Stream(InsertRequestStream),
|
||||
}
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn insert(&self, req: InsertRequest) -> Result<Output> {
|
||||
// FIXME(dennis): table_ref is used in InsertSnafu and the req is consumed
|
||||
@@ -77,7 +59,7 @@ impl SqlHandler {
|
||||
table_ref: TableReference,
|
||||
table: &TableRef,
|
||||
stmt: Insert,
|
||||
) -> Result<SqlRequest> {
|
||||
) -> Result<InsertRequest> {
|
||||
let values = stmt
|
||||
.values_body()
|
||||
.context(ParseSqlValueSnafu)?
|
||||
@@ -129,7 +111,7 @@ impl SqlHandler {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(SqlRequest::Insert(InsertRequest {
|
||||
Ok(InsertRequest {
|
||||
catalog_name: table_ref.catalog.to_string(),
|
||||
schema_name: table_ref.schema.to_string(),
|
||||
table_name: table_ref.table.to_string(),
|
||||
@@ -138,150 +120,14 @@ impl SqlHandler {
|
||||
.map(|(cs, mut b)| (cs.name.to_string(), b.to_vector()))
|
||||
.collect(),
|
||||
region_number: 0,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
fn build_request_from_batch(
|
||||
stmt: Insert,
|
||||
table: TableRef,
|
||||
batch: RecordBatch,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<SqlRequest> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)?;
|
||||
|
||||
let schema = table.schema();
|
||||
let columns: Vec<_> = if stmt.columns().is_empty() {
|
||||
schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.map(|c| c.name.to_string())
|
||||
.collect()
|
||||
} else {
|
||||
stmt.columns().iter().map(|c| (*c).clone()).collect()
|
||||
};
|
||||
let columns_num = columns.len();
|
||||
|
||||
ensure!(
|
||||
batch.num_columns() == columns_num,
|
||||
ColumnValuesNumberMismatchSnafu {
|
||||
columns: columns_num,
|
||||
values: batch.num_columns(),
|
||||
}
|
||||
);
|
||||
|
||||
let batch_schema = &batch.schema;
|
||||
let batch_columns = batch_schema.column_schemas();
|
||||
assert_eq!(batch_columns.len(), columns_num);
|
||||
let mut columns_values = HashMap::with_capacity(columns_num);
|
||||
|
||||
for (i, column_name) in columns.into_iter().enumerate() {
|
||||
let column_schema = schema
|
||||
.column_schema_by_name(&column_name)
|
||||
.with_context(|| ColumnNotFoundSnafu {
|
||||
table_name: &table_name,
|
||||
column_name: &column_name,
|
||||
})?;
|
||||
let expect_datatype = column_schema.data_type.as_arrow_type();
|
||||
// It's safe to retrieve the column schema by index, we already
|
||||
// check columns number is the same above.
|
||||
let batch_datatype = batch_columns[i].data_type.as_arrow_type();
|
||||
let coerced_type = coerce_types(&expect_datatype, &Operator::Eq, &batch_datatype)
|
||||
.map_err(|_| Error::ColumnTypeMismatch {
|
||||
column: column_name.clone(),
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
})?;
|
||||
|
||||
ensure!(
|
||||
expect_datatype == coerced_type,
|
||||
ColumnTypeMismatchSnafu {
|
||||
column: column_name,
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
}
|
||||
);
|
||||
let vector = batch
|
||||
.column(i)
|
||||
.cast(&column_schema.data_type)
|
||||
.map_err(|_| Error::ColumnTypeMismatch {
|
||||
column: column_name.clone(),
|
||||
expected: column_schema.data_type.clone(),
|
||||
actual: batch_columns[i].data_type.clone(),
|
||||
})?;
|
||||
|
||||
columns_values.insert(column_name, vector);
|
||||
}
|
||||
|
||||
Ok(SqlRequest::Insert(InsertRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
columns_values,
|
||||
region_number: 0,
|
||||
}))
|
||||
}
|
||||
|
||||
// FIXME(dennis): move it to frontend when refactor is done.
|
||||
async fn build_stream_from_query(
|
||||
&self,
|
||||
table: TableRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequestStream> {
|
||||
let query = stmt
|
||||
.query_body()
|
||||
.context(ParseSqlValueSnafu)?
|
||||
.context(MissingInsertBodySnafu)?;
|
||||
|
||||
let logical_plan = self
|
||||
.query_engine
|
||||
.planner()
|
||||
.plan(
|
||||
QueryStatement::Sql(Statement::Query(Box::new(query))),
|
||||
query_ctx.clone(),
|
||||
)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
|
||||
let output = self
|
||||
.query_engine
|
||||
.execute(&logical_plan)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)?;
|
||||
|
||||
let stream: InsertRequestStream = match output {
|
||||
Output::RecordBatches(batches) => {
|
||||
Box::pin(stream::iter(batches.take()).map(move |batch| {
|
||||
Self::build_request_from_batch(
|
||||
stmt.clone(),
|
||||
table.clone(),
|
||||
batch,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
Output::Stream(stream) => Box::pin(stream.map(move |batch| {
|
||||
Self::build_request_from_batch(
|
||||
stmt.clone(),
|
||||
table.clone(),
|
||||
batch.context(CollectRecordsSnafu)?,
|
||||
query_ctx.clone(),
|
||||
)
|
||||
})),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
pub(crate) async fn insert_to_requests(
|
||||
&self,
|
||||
pub async fn insert_to_request(
|
||||
catalog_manager: CatalogManagerRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequests> {
|
||||
) -> Result<InsertRequest> {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx.clone())?;
|
||||
|
||||
@@ -293,16 +139,8 @@ impl SqlHandler {
|
||||
table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
|
||||
})?;
|
||||
|
||||
if stmt.is_insert_select() {
|
||||
Ok(InsertRequests::Stream(
|
||||
self.build_stream_from_query(table, stmt, query_ctx).await?,
|
||||
))
|
||||
} else {
|
||||
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
|
||||
Ok(InsertRequests::Request(Self::build_request_from_values(
|
||||
table_ref, &table, stmt,
|
||||
)?))
|
||||
}
|
||||
let table_ref = TableReference::full(&catalog_name, &schema_name, &table_name);
|
||||
Self::build_request_from_values(table_ref, &table, stmt)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ use common_telemetry::logging;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
|
||||
use query::parser::{QueryLanguageParser, QueryStatement};
|
||||
use session::context::QueryContext;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::ResultExt;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
@@ -217,20 +217,20 @@ async fn test_execute_insert_by_select() {
|
||||
try_execute_sql(&instance, "insert into demo2(host) select * from demo1")
|
||||
.await
|
||||
.unwrap_err(),
|
||||
Error::ColumnValuesNumberMismatch { .. }
|
||||
Error::PlanStatement { .. }
|
||||
));
|
||||
assert!(matches!(
|
||||
try_execute_sql(&instance, "insert into demo2 select cpu,memory from demo1")
|
||||
.await
|
||||
.unwrap_err(),
|
||||
Error::ColumnValuesNumberMismatch { .. }
|
||||
Error::PlanStatement { .. }
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
try_execute_sql(&instance, "insert into demo2(ts) select memory from demo1")
|
||||
.await
|
||||
.unwrap_err(),
|
||||
Error::ColumnTypeMismatch { .. }
|
||||
Error::PlanStatement { .. }
|
||||
));
|
||||
|
||||
let output = execute_sql(&instance, "insert into demo2 select * from demo1").await;
|
||||
@@ -751,7 +751,7 @@ async fn test_delete() {
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
"delete from test_table where host = host1 and ts = 1655276557000 ",
|
||||
"delete from test_table where host = 'host1' and ts = 1655276557000 ",
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(1)));
|
||||
@@ -769,36 +769,6 @@ async fn test_delete() {
|
||||
check_output_stream(output, expect).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_to() {
|
||||
let instance = setup_test_instance("test_execute_copy_to").await;
|
||||
|
||||
// setups
|
||||
execute_sql(
|
||||
&instance,
|
||||
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
// exports
|
||||
let data_dir = instance.data_tmp_dir().path();
|
||||
|
||||
let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
|
||||
|
||||
let output = execute_sql(&instance, ©_to_stmt).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_to_s3() {
|
||||
logging::init_default_ut_logging();
|
||||
@@ -838,91 +808,6 @@ async fn test_execute_copy_to_s3() {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_from() {
|
||||
let instance = setup_test_instance("test_execute_copy_from").await;
|
||||
|
||||
// setups
|
||||
execute_sql(
|
||||
&instance,
|
||||
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
// export
|
||||
let data_dir = instance.data_tmp_dir().path();
|
||||
|
||||
let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
|
||||
|
||||
let output = execute_sql(&instance, ©_to_stmt).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
struct Test<'a> {
|
||||
sql: &'a str,
|
||||
table_name: &'a str,
|
||||
}
|
||||
let tests = [
|
||||
Test {
|
||||
sql: &format!(
|
||||
"Copy with_filename FROM '{}/export/demo.parquet_1_2'",
|
||||
data_dir.display()
|
||||
),
|
||||
table_name: "with_filename",
|
||||
},
|
||||
Test {
|
||||
sql: &format!("Copy with_path FROM '{}/export/'", data_dir.display()),
|
||||
table_name: "with_path",
|
||||
},
|
||||
Test {
|
||||
sql: &format!(
|
||||
"Copy with_pattern FROM '{}/export/' WITH (PATTERN = 'demo.*')",
|
||||
data_dir.display()
|
||||
),
|
||||
table_name: "with_pattern",
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
// import
|
||||
execute_sql(
|
||||
&instance,
|
||||
&format!(
|
||||
"create table {}(host string, cpu double, memory double, ts timestamp time index);",
|
||||
test.table_name
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = execute_sql(&instance, test.sql).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
&format!("select * from {} order by ts", test.table_name),
|
||||
)
|
||||
.await;
|
||||
let expected = "\
|
||||
+-------+------+--------+---------------------+
|
||||
| host | cpu | memory | ts |
|
||||
+-------+------+--------+---------------------+
|
||||
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
|
||||
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
|
||||
+-------+------+--------+---------------------+"
|
||||
.to_string();
|
||||
check_output_stream(output, expected).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_from_s3() {
|
||||
logging::init_default_ut_logging();
|
||||
@@ -1077,16 +962,30 @@ async fn try_execute_sql_in_db(
|
||||
) -> Result<Output, crate::error::Error> {
|
||||
let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
|
||||
|
||||
async fn plan_exec(
|
||||
instance: &MockInstance,
|
||||
stmt: QueryStatement,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<Output, Error> {
|
||||
let engine = instance.inner().query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let engine = instance.inner().query_engine();
|
||||
let plan = engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
engine.execute(&plan).await.context(ExecuteLogicalPlanSnafu)
|
||||
QueryStatement::Sql(Statement::Query(_)) | QueryStatement::Sql(Statement::Delete(_)) => {
|
||||
plan_exec(instance, stmt, query_ctx).await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Insert(ref insert)) if insert.is_insert_select() => {
|
||||
plan_exec(instance, stmt, query_ctx).await
|
||||
}
|
||||
_ => instance.inner().execute_stmt(stmt, query_ctx).await,
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use mito::config::EngineConfig;
|
||||
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
|
||||
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
|
||||
use query::QueryEngineFactory;
|
||||
use servers::Mode;
|
||||
use session::context::QueryContext;
|
||||
use snafu::ResultExt;
|
||||
@@ -87,7 +86,7 @@ impl MockInstance {
|
||||
match stmt {
|
||||
QueryStatement::Sql(Statement::Query(_)) => {
|
||||
let plan = planner.plan(stmt, QueryContext::arc()).await.unwrap();
|
||||
engine.execute(&plan).await.unwrap()
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
QueryStatement::Sql(Statement::Tql(tql)) => {
|
||||
let plan = match tql {
|
||||
@@ -103,7 +102,7 @@ impl MockInstance {
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
engine.execute(&plan).await.unwrap()
|
||||
engine.execute(plan, QueryContext::arc()).await.unwrap()
|
||||
}
|
||||
_ => self
|
||||
.inner()
|
||||
@@ -116,10 +115,6 @@ impl MockInstance {
|
||||
pub(crate) fn inner(&self) -> &Instance {
|
||||
&self.instance
|
||||
}
|
||||
|
||||
pub(crate) fn data_tmp_dir(&self) -> &TempDir {
|
||||
&self._guard._data_tmp_dir
|
||||
}
|
||||
}
|
||||
|
||||
struct TestGuard {
|
||||
@@ -205,17 +200,7 @@ pub async fn create_mock_sql_handler() -> SqlHandler {
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let catalog_list = catalog::local::new_memory_catalog_list().unwrap();
|
||||
let factory = QueryEngineFactory::new(catalog_list);
|
||||
|
||||
SqlHandler::new(
|
||||
mock_engine.clone(),
|
||||
catalog_manager,
|
||||
factory.query_engine(),
|
||||
mock_engine,
|
||||
None,
|
||||
)
|
||||
SqlHandler::new(mock_engine.clone(), catalog_manager, mock_engine, None)
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_test_instance(test_name: &str) -> MockInstance {
|
||||
|
||||
@@ -428,52 +428,63 @@ fn parse_stmt(sql: &str) -> Result<Vec<Statement>> {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
async fn plan_exec(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let planner = self.query_engine.planner();
|
||||
let plan = planner
|
||||
.plan(QueryStatement::Sql(stmt), query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt = QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||
self.query_engine
|
||||
.planner()
|
||||
.plan(stmt, query_ctx.clone())
|
||||
.await
|
||||
.context(PlanStatementSnafu)?
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
self.query_engine
|
||||
.execute(plan, query_ctx)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
}
|
||||
|
||||
async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
|
||||
check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
|
||||
|
||||
let planner = self.query_engine.planner();
|
||||
|
||||
match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) => {
|
||||
let plan = planner
|
||||
.plan(QueryStatement::Sql(stmt), query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?;
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
self.plan_exec(stmt, query_ctx).await
|
||||
}
|
||||
Statement::Tql(tql) => {
|
||||
let plan = match tql {
|
||||
Tql::Eval(eval) => {
|
||||
let promql = PromQuery {
|
||||
start: eval.start,
|
||||
end: eval.end,
|
||||
step: eval.step,
|
||||
query: eval.query,
|
||||
};
|
||||
let stmt =
|
||||
QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
|
||||
planner
|
||||
.plan(stmt, query_ctx)
|
||||
.await
|
||||
.context(PlanStatementSnafu)?
|
||||
}
|
||||
Tql::Explain(_) => unimplemented!(),
|
||||
};
|
||||
self.query_engine
|
||||
.execute(&plan)
|
||||
.await
|
||||
.context(ExecLogicalPlanSnafu)
|
||||
|
||||
// For performance consideration, only "insert with select" is executed by query engine.
|
||||
// Plain insert ("insert with values") is still executed directly in statement.
|
||||
Statement::Insert(ref insert) if insert.is_insert_select() => {
|
||||
self.plan_exec(stmt, query_ctx).await
|
||||
}
|
||||
|
||||
Statement::Tql(tql) => self.execute_tql(tql, query_ctx).await,
|
||||
Statement::CreateDatabase(_)
|
||||
| Statement::ShowDatabases(_)
|
||||
| Statement::CreateTable(_)
|
||||
| Statement::ShowTables(_)
|
||||
| Statement::DescribeTable(_)
|
||||
| Statement::Insert(_)
|
||||
| Statement::Delete(_)
|
||||
| Statement::Alter(_)
|
||||
| Statement::DropTable(_)
|
||||
| Statement::Copy(_) => self
|
||||
@@ -647,8 +658,8 @@ pub fn check_permission(
|
||||
}
|
||||
|
||||
match stmt {
|
||||
// query,explain and tql will be checked in QueryEngineState
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) => {}
|
||||
// These are executed by query engine, and will be checked there.
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) | Statement::Delete(_) => {}
|
||||
// database ops won't be checked
|
||||
Statement::CreateDatabase(_) | Statement::ShowDatabases(_) | Statement::Use(_) => {}
|
||||
// show create table and alter are not supported yet
|
||||
@@ -673,9 +684,6 @@ pub fn check_permission(
|
||||
Statement::DescribeTable(stmt) => {
|
||||
validate_param(stmt.name(), query_ctx)?;
|
||||
}
|
||||
Statement::Delete(delete) => {
|
||||
validate_param(delete.table_name(), query_ctx)?;
|
||||
}
|
||||
Statement::Copy(stmd) => match stmd {
|
||||
CopyTable::To(copy_table_to) => validate_param(©_table_to.table_name, query_ctx)?,
|
||||
CopyTable::From(copy_table_from) => {
|
||||
@@ -1086,7 +1094,7 @@ mod tests {
|
||||
.plan(stmt.clone(), QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
@@ -33,6 +33,7 @@ use common_error::prelude::BoxedError;
|
||||
use common_query::Output;
|
||||
use common_telemetry::{debug, info};
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datanode::sql::SqlHandler;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::RawSchema;
|
||||
use meta_client::client::MetaClient;
|
||||
@@ -60,13 +61,12 @@ use crate::catalog::FrontendCatalogManager;
|
||||
use crate::datanode::DatanodeClients;
|
||||
use crate::error::{
|
||||
self, AlterExprToRequestSnafu, CatalogEntrySerdeSnafu, CatalogSnafu, ColumnDataTypeSnafu,
|
||||
DeserializePartitionSnafu, NotSupportedSnafu, ParseSqlSnafu, PrimaryKeyNotFoundSnafu,
|
||||
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, StartMetaClientSnafu,
|
||||
TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu, ToTableInsertRequestSnafu,
|
||||
UnrecognizedTableOptionSnafu,
|
||||
DeserializePartitionSnafu, InvokeDatanodeSnafu, NotSupportedSnafu, ParseSqlSnafu,
|
||||
PrimaryKeyNotFoundSnafu, RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu,
|
||||
StartMetaClientSnafu, TableAlreadyExistSnafu, TableNotFoundSnafu, TableSnafu,
|
||||
ToTableInsertRequestSnafu, UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
use crate::expr_factory;
|
||||
use crate::sql::insert_to_request;
|
||||
use crate::table::DistTable;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -374,7 +374,10 @@ impl DistInstance {
|
||||
.context(CatalogSnafu)?
|
||||
.context(TableNotFoundSnafu { table_name: table })?;
|
||||
|
||||
let insert_request = insert_to_request(&table, *insert, query_ctx)?;
|
||||
let insert_request =
|
||||
SqlHandler::insert_to_request(self.catalog_manager.clone(), *insert, query_ctx)
|
||||
.await
|
||||
.context(InvokeDatanodeSnafu)?;
|
||||
|
||||
return Ok(Output::AffectedRows(
|
||||
table.insert(insert_request).await.context(TableSnafu)?,
|
||||
|
||||
@@ -590,7 +590,7 @@ CREATE TABLE {table_name} (
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
let output = engine.execute(&plan).await.unwrap();
|
||||
let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
|
||||
let Output::Stream(stream) = output else { unreachable!() };
|
||||
let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
let actual = recordbatches.pretty_print().unwrap();
|
||||
|
||||
@@ -28,7 +28,6 @@ pub mod postgres;
|
||||
pub mod prom;
|
||||
pub mod prometheus;
|
||||
mod server;
|
||||
mod sql;
|
||||
mod table;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::snafu::ensure;
|
||||
use datanode::instance::sql::table_idents_to_full_name;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::MutableVector;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
use sql::statements;
|
||||
use sql::statements::insert::Insert;
|
||||
use table::requests::InsertRequest;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::{self, ExternalSnafu, Result};
|
||||
|
||||
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
|
||||
|
||||
// TODO(fys): Extract the common logic in datanode and frontend in the future.
|
||||
// This function convert insert statement to an `InsertRequest` to region 0.
|
||||
pub(crate) fn insert_to_request(
|
||||
table: &TableRef,
|
||||
stmt: Insert,
|
||||
query_ctx: QueryContextRef,
|
||||
) -> Result<InsertRequest> {
|
||||
let columns = stmt.columns();
|
||||
let values = stmt
|
||||
.values_body()
|
||||
.context(error::ParseSqlSnafu)?
|
||||
.context(error::MissingInsertValuesSnafu)?;
|
||||
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(stmt.table_name(), query_ctx)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
let schema = table.schema();
|
||||
let columns_num = if columns.is_empty() {
|
||||
schema.column_schemas().len()
|
||||
} else {
|
||||
columns.len()
|
||||
};
|
||||
let rows_num = values.len();
|
||||
|
||||
let mut columns_builders: Vec<(&ColumnSchema, Box<dyn MutableVector>)> =
|
||||
Vec::with_capacity(columns_num);
|
||||
|
||||
if columns.is_empty() {
|
||||
for column_schema in schema.column_schemas() {
|
||||
let data_type = &column_schema.data_type;
|
||||
columns_builders.push((column_schema, data_type.create_mutable_vector(rows_num)));
|
||||
}
|
||||
} else {
|
||||
for column_name in columns {
|
||||
let column_schema = schema.column_schema_by_name(column_name).with_context(|| {
|
||||
error::ColumnNotFoundSnafu {
|
||||
table_name: &table_name,
|
||||
column_name: column_name.to_string(),
|
||||
}
|
||||
})?;
|
||||
let data_type = &column_schema.data_type;
|
||||
columns_builders.push((column_schema, data_type.create_mutable_vector(rows_num)));
|
||||
}
|
||||
}
|
||||
|
||||
for row in values {
|
||||
ensure!(
|
||||
row.len() == columns_num,
|
||||
error::ColumnValuesNumberMismatchSnafu {
|
||||
columns: columns_num,
|
||||
values: row.len(),
|
||||
}
|
||||
);
|
||||
|
||||
for (sql_val, (column_schema, builder)) in row.iter().zip(columns_builders.iter_mut()) {
|
||||
add_row_to_vector(column_schema, sql_val, builder)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(InsertRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
columns_values: columns_builders
|
||||
.into_iter()
|
||||
.map(|(cs, mut b)| (cs.name.to_string(), b.to_vector()))
|
||||
.collect(),
|
||||
region_number: 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn add_row_to_vector(
|
||||
column_schema: &ColumnSchema,
|
||||
sql_val: &SqlValue,
|
||||
builder: &mut Box<dyn MutableVector>,
|
||||
) -> Result<()> {
|
||||
let value = if replace_default(sql_val) {
|
||||
column_schema
|
||||
.create_default()
|
||||
.context(error::ColumnDefaultValueSnafu {
|
||||
column: column_schema.name.to_string(),
|
||||
})?
|
||||
.context(error::ColumnNoneDefaultValueSnafu {
|
||||
column: column_schema.name.to_string(),
|
||||
})?
|
||||
} else {
|
||||
statements::sql_value_to_value(&column_schema.name, &column_schema.data_type, sql_val)
|
||||
.context(error::ParseSqlSnafu)?
|
||||
};
|
||||
builder.push_value_ref(value.as_value_ref());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn replace_default(sql_val: &SqlValue) -> bool {
|
||||
matches!(sql_val, SqlValue::Placeholder(s) if s.to_lowercase() == DEFAULT_PLACEHOLDER_VALUE)
|
||||
}
|
||||
@@ -74,8 +74,7 @@ impl DistTable {
|
||||
|
||||
let mut success = 0;
|
||||
for join in joins {
|
||||
let object_result = join.await.context(error::JoinTaskSnafu)??;
|
||||
let Output::AffectedRows(rows) = object_result else { unreachable!() };
|
||||
let rows = join.await.context(error::JoinTaskSnafu)?? as usize;
|
||||
success += rows;
|
||||
}
|
||||
Ok(Output::AffectedRows(success))
|
||||
|
||||
@@ -47,7 +47,7 @@ impl DatanodeInstance {
|
||||
Self { table, db }
|
||||
}
|
||||
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<Output> {
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<u32> {
|
||||
self.db.insert(request).await
|
||||
}
|
||||
|
||||
|
||||
@@ -125,15 +125,15 @@ pub(crate) async fn create_datanode_client(
|
||||
|
||||
// create a mock datanode grpc service, see example here:
|
||||
// https://github.com/hyperium/tonic/blob/master/examples/src/mock/mock.rs
|
||||
let datanode_service = GrpcServer::new(
|
||||
let grpc_server = GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
|
||||
None,
|
||||
runtime,
|
||||
)
|
||||
.create_service();
|
||||
);
|
||||
tokio::spawn(async move {
|
||||
Server::builder()
|
||||
.add_service(datanode_service)
|
||||
.add_service(grpc_server.create_flight_service())
|
||||
.add_service(grpc_server.create_database_service())
|
||||
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
|
||||
.await
|
||||
});
|
||||
|
||||
@@ -33,4 +33,4 @@ tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
etcd-client = "0.10"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -22,8 +22,8 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use meta_client::client::MetaClientBuilder;
|
||||
use meta_client::rpc::{
|
||||
BatchPutRequest, CompareAndPutRequest, CreateRequest, DeleteRangeRequest, Partition,
|
||||
PutRequest, RangeRequest, TableName,
|
||||
BatchDeleteRequest, BatchGetRequest, BatchPutRequest, CompareAndPutRequest, CreateRequest,
|
||||
DeleteRangeRequest, Partition, PutRequest, RangeRequest, TableName,
|
||||
};
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
@@ -146,6 +146,30 @@ async fn run() {
|
||||
// get none
|
||||
let res = meta_client.range(range).await.unwrap();
|
||||
event!(Level::INFO, "get range result: {:#?}", res);
|
||||
|
||||
// batch delete
|
||||
// put two
|
||||
let batch_put = BatchPutRequest::new()
|
||||
.add_kv(b"batch_put1".to_vec(), b"batch_put_v1".to_vec())
|
||||
.add_kv(b"batch_put2".to_vec(), b"batch_put_v2".to_vec())
|
||||
.with_prev_kv();
|
||||
let res = meta_client.batch_put(batch_put).await.unwrap();
|
||||
event!(Level::INFO, "batch put result: {:#?}", res);
|
||||
|
||||
// delete one
|
||||
let batch_delete = BatchDeleteRequest::new()
|
||||
.add_key(b"batch_put1".to_vec())
|
||||
.with_prev_kv();
|
||||
let res = meta_client.batch_delete(batch_delete).await.unwrap();
|
||||
event!(Level::INFO, "batch delete result: {:#?}", res);
|
||||
|
||||
// get other one
|
||||
let batch_get = BatchGetRequest::new()
|
||||
.add_key(b"batch_put1".to_vec())
|
||||
.add_key(b"batch_put2".to_vec());
|
||||
|
||||
let res = meta_client.batch_get(batch_get).await.unwrap();
|
||||
event!(Level::INFO, "batch get result: {:#?}", res);
|
||||
}
|
||||
|
||||
fn new_table_info() -> RawTableInfo {
|
||||
|
||||
@@ -32,10 +32,10 @@ use crate::error::Result;
|
||||
use crate::rpc::lock::{LockRequest, LockResponse, UnlockRequest};
|
||||
use crate::rpc::router::DeleteRequest;
|
||||
use crate::rpc::{
|
||||
BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest,
|
||||
CompareAndPutResponse, CreateRequest, DeleteRangeRequest, DeleteRangeResponse,
|
||||
MoveValueRequest, MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
RouteRequest, RouteResponse,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, CreateRequest,
|
||||
DeleteRangeRequest, DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse, RouteRequest, RouteResponse,
|
||||
};
|
||||
|
||||
pub type Id = (u64, u64);
|
||||
@@ -256,6 +256,14 @@ impl MetaClient {
|
||||
self.store_client()?.batch_put(req.into()).await?.try_into()
|
||||
}
|
||||
|
||||
/// BatchDelete atomically deletes the given keys from the key-value store.
|
||||
pub async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
self.store_client()?
|
||||
.batch_delete(req.into())
|
||||
.await?
|
||||
.try_into()
|
||||
}
|
||||
|
||||
/// CompareAndPut atomically puts the value to the given updated
|
||||
/// value if the current value == the expected value.
|
||||
pub async fn compare_and_put(
|
||||
|
||||
@@ -17,9 +17,10 @@ use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::store_client::StoreClient;
|
||||
use api::v1::meta::{
|
||||
BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest,
|
||||
CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, MoveValueRequest,
|
||||
MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -80,6 +81,11 @@ impl Client {
|
||||
inner.batch_put(req).await
|
||||
}
|
||||
|
||||
pub async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.batch_delete(req).await
|
||||
}
|
||||
|
||||
pub async fn compare_and_put(
|
||||
&self,
|
||||
req: CompareAndPutRequest,
|
||||
@@ -169,6 +175,17 @@ impl Inner {
|
||||
Ok(res.into_inner())
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let mut client = self.random_client()?;
|
||||
req.set_header(self.id);
|
||||
let res = client
|
||||
.batch_delete(req)
|
||||
.await
|
||||
.context(error::TonicStatusSnafu)?;
|
||||
|
||||
Ok(res.into_inner())
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
mut req: CompareAndPutRequest,
|
||||
|
||||
@@ -28,9 +28,10 @@ pub use router::{
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use store::{
|
||||
BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest,
|
||||
CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, MoveValueRequest,
|
||||
MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::{
|
||||
BatchDeleteRequest as PbBatchDeleteRequest, BatchDeleteResponse as PbBatchDeleteResponse,
|
||||
BatchGetRequest as PbBatchGetRequest, BatchGetResponse as PbBatchGetResponse,
|
||||
BatchPutRequest as PbBatchPutRequest, BatchPutResponse as PbBatchPutResponse,
|
||||
CompareAndPutRequest as PbCompareAndPutRequest,
|
||||
@@ -377,6 +378,78 @@ impl BatchPutResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BatchDeleteRequest {
|
||||
pub keys: Vec<Vec<u8>>,
|
||||
/// If prev_kv is set, gets the previous key-value pairs before deleting it.
|
||||
/// The previous key-value pairs will be returned in the batch delete response.
|
||||
pub prev_kv: bool,
|
||||
}
|
||||
|
||||
impl From<BatchDeleteRequest> for PbBatchDeleteRequest {
|
||||
fn from(req: BatchDeleteRequest) -> Self {
|
||||
Self {
|
||||
header: None,
|
||||
keys: req.keys,
|
||||
prev_kv: req.prev_kv,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchDeleteRequest {
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
keys: vec![],
|
||||
prev_kv: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_key(mut self, key: impl Into<Vec<u8>>) -> Self {
|
||||
self.keys.push(key.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// If prev_kv is set, gets the previous key-value pair before deleting it.
|
||||
/// The previous key-value pair will be returned in the batch delete response.
|
||||
#[inline]
|
||||
pub fn with_prev_kv(mut self) -> Self {
|
||||
self.prev_kv = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BatchDeleteResponse(PbBatchDeleteResponse);
|
||||
|
||||
impl TryFrom<PbBatchDeleteResponse> for BatchDeleteResponse {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbBatchDeleteResponse) -> Result<Self> {
|
||||
util::check_response_header(pb.header.as_ref())?;
|
||||
|
||||
Ok(Self::new(pb))
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchDeleteResponse {
|
||||
#[inline]
|
||||
pub fn new(res: PbBatchDeleteResponse) -> Self {
|
||||
Self(res)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_header(&mut self) -> Option<ResponseHeader> {
|
||||
self.0.header.take().map(ResponseHeader::new)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_prev_kvs(&mut self) -> Vec<KeyValue> {
|
||||
self.0.prev_kvs.drain(..).map(KeyValue::new).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct CompareAndPutRequest {
|
||||
/// key is the key, in bytes, to put into the key-value store.
|
||||
@@ -832,6 +905,39 @@ mod tests {
|
||||
assert_eq!(b"v1".to_vec(), kvs[0].value().to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_delete_request_trans() {
|
||||
let req = BatchDeleteRequest::new()
|
||||
.add_key(b"test_key1".to_vec())
|
||||
.add_key(b"test_key2".to_vec())
|
||||
.add_key(b"test_key3".to_vec())
|
||||
.with_prev_kv();
|
||||
|
||||
let into_req: PbBatchDeleteRequest = req.into();
|
||||
assert!(into_req.header.is_none());
|
||||
assert_eq!(&b"test_key1".to_vec(), into_req.keys.get(0).unwrap());
|
||||
assert_eq!(&b"test_key2".to_vec(), into_req.keys.get(1).unwrap());
|
||||
assert_eq!(&b"test_key3".to_vec(), into_req.keys.get(2).unwrap());
|
||||
assert!(into_req.prev_kv);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_delete_response_trans() {
|
||||
let pb_res = PbBatchDeleteResponse {
|
||||
header: None,
|
||||
prev_kvs: vec![PbKeyValue {
|
||||
key: b"k1".to_vec(),
|
||||
value: b"v1".to_vec(),
|
||||
}],
|
||||
};
|
||||
|
||||
let mut res = BatchDeleteResponse::new(pb_res);
|
||||
assert!(res.take_header().is_none());
|
||||
let kvs = res.take_prev_kvs();
|
||||
assert_eq!(b"k1".to_vec(), kvs[0].key().to_vec());
|
||||
assert_eq!(b"v1".to_vec(), kvs[0].value().to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compare_and_put_request_trans() {
|
||||
let (key, expect, value) = (
|
||||
|
||||
@@ -28,6 +28,7 @@ http-body = "0.4"
|
||||
lazy_static = "1.4"
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -22,6 +22,7 @@ use api::v1::meta::store_server::StoreServer;
|
||||
use etcd_client::Client;
|
||||
use snafu::ResultExt;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio_stream::wrappers::TcpListenerStream;
|
||||
use tonic::transport::server::Router;
|
||||
|
||||
@@ -44,44 +45,65 @@ pub struct MetaSrvInstance {
|
||||
meta_srv: MetaSrv,
|
||||
|
||||
opts: MetaSrvOptions,
|
||||
|
||||
signal_sender: Option<Sender<()>>,
|
||||
}
|
||||
|
||||
impl MetaSrvInstance {
|
||||
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
|
||||
let meta_srv = build_meta_srv(&opts).await?;
|
||||
|
||||
Ok(MetaSrvInstance { meta_srv, opts })
|
||||
Ok(MetaSrvInstance {
|
||||
meta_srv,
|
||||
opts,
|
||||
signal_sender: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.meta_srv.start().await;
|
||||
bootstrap_meta_srv_with_router(&self.opts.bind_addr, router(self.meta_srv.clone())).await?;
|
||||
let (tx, mut rx) = mpsc::channel::<()>(1);
|
||||
|
||||
self.signal_sender = Some(tx);
|
||||
|
||||
bootstrap_meta_srv_with_router(
|
||||
&self.opts.bind_addr,
|
||||
router(self.meta_srv.clone()),
|
||||
&mut rx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
// TODO: shutdown the router
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
if let Some(signal) = &self.signal_sender {
|
||||
signal
|
||||
.send(())
|
||||
.await
|
||||
.context(error::SendShutdownSignalSnafu)?;
|
||||
}
|
||||
|
||||
self.meta_srv.shutdown();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Bootstrap the rpc server to serve incoming request
|
||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||
let meta_srv = make_meta_srv(&opts).await?;
|
||||
bootstrap_meta_srv_with_router(&opts.bind_addr, router(meta_srv)).await
|
||||
}
|
||||
|
||||
pub async fn bootstrap_meta_srv_with_router(bind_addr: &str, router: Router) -> Result<()> {
|
||||
pub async fn bootstrap_meta_srv_with_router(
|
||||
bind_addr: &str,
|
||||
router: Router,
|
||||
signal: &mut Receiver<()>,
|
||||
) -> Result<()> {
|
||||
let listener = TcpListener::bind(bind_addr)
|
||||
.await
|
||||
.context(error::TcpBindSnafu { addr: bind_addr })?;
|
||||
let listener = TcpListenerStream::new(listener);
|
||||
|
||||
router
|
||||
.serve_with_incoming(listener)
|
||||
.serve_with_incoming_shutdown(listener, async {
|
||||
signal.recv().await;
|
||||
})
|
||||
.await
|
||||
.context(error::StartGrpcSnafu)?;
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ mod tests {
|
||||
let stat_val = StatValue { stats: vec![stat] }.try_into().unwrap();
|
||||
|
||||
let kv = KeyValue {
|
||||
key: stat_key.clone().into(),
|
||||
key: stat_key.into(),
|
||||
value: stat_val,
|
||||
};
|
||||
|
||||
|
||||
@@ -15,12 +15,16 @@
|
||||
use std::string::FromUtf8Error;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
use tonic::codegen::http;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to send shutdown signal"))]
|
||||
SendShutdownSignal { source: SendError<()> },
|
||||
|
||||
#[snafu(display("Error stream request next is None"))]
|
||||
StreamNone { backtrace: Backtrace },
|
||||
|
||||
@@ -312,6 +316,7 @@ impl ErrorExt for Error {
|
||||
| Error::LeaseGrant { .. }
|
||||
| Error::LockNotConfig { .. }
|
||||
| Error::ExceededRetryLimit { .. }
|
||||
| Error::SendShutdownSignal { .. }
|
||||
| Error::StartGrpc { .. } => StatusCode::Internal,
|
||||
Error::EmptyKey { .. }
|
||||
| Error::MissingRequiredParameter { .. }
|
||||
|
||||
546
src/meta-srv/src/failure_detector.rs
Normal file
546
src/meta-srv/src/failure_detector.rs
Normal file
@@ -0,0 +1,546 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
/// This is our port of Akka's "[PhiAccrualFailureDetector](https://github.com/akka/akka/blob/main/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala)"
|
||||
/// You can find it's document here:
|
||||
/// https://doc.akka.io/docs/akka/current/typed/failure-detector.html
|
||||
///
|
||||
/// Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their
|
||||
/// paper: [https://oneofus.la/have-emacs-will-hack/files/HDY04.pdf]
|
||||
///
|
||||
/// The suspicion level of failure is given by a value called φ (phi).
|
||||
/// The basic idea of the φ failure detector is to express the value of φ on a scale that
|
||||
/// is dynamically adjusted to reflect current network conditions. A configurable
|
||||
/// threshold is used to decide if φ is considered to be a failure.
|
||||
///
|
||||
/// The value of φ is calculated as:
|
||||
///
|
||||
/// φ = -log10(1 - F(timeSinceLastHeartbeat)
|
||||
///
|
||||
/// where F is the cumulative distribution function of a normal distribution with mean
|
||||
/// and standard deviation estimated from historical heartbeat inter-arrival times.
|
||||
#[cfg_attr(test, derive(Clone))]
|
||||
pub(crate) struct PhiAccrualFailureDetector {
|
||||
/// A low threshold is prone to generate many wrong suspicions but ensures a quick detection
|
||||
/// in the event of a real crash. Conversely, a high threshold generates fewer mistakes but
|
||||
/// needs more time to detect actual crashes.
|
||||
threshold: f32,
|
||||
|
||||
/// Minimum standard deviation to use for the normal distribution used when calculating phi.
|
||||
/// Too low standard deviation might result in too much sensitivity for sudden, but normal,
|
||||
/// deviations in heartbeat inter arrival times.
|
||||
min_std_deviation_millis: f32,
|
||||
|
||||
/// Duration corresponding to number of potentially lost/delayed heartbeats that will be
|
||||
/// accepted before considering it to be an anomaly.
|
||||
/// This margin is important to be able to survive sudden, occasional, pauses in heartbeat
|
||||
/// arrivals, due to for example network drop.
|
||||
acceptable_heartbeat_pause_millis: u32,
|
||||
|
||||
/// Bootstrap the stats with heartbeats that corresponds to this duration, with a rather high
|
||||
/// standard deviation (since environment is unknown in the beginning).
|
||||
first_heartbeat_estimate_millis: u32,
|
||||
|
||||
heartbeat_history: HeartbeatHistory,
|
||||
last_heartbeat_millis: Option<i64>,
|
||||
}
|
||||
|
||||
impl Default for PhiAccrualFailureDetector {
|
||||
fn default() -> Self {
|
||||
// default configuration is the same as of Akka:
|
||||
// https://github.com/akka/akka/blob/main/akka-cluster/src/main/resources/reference.conf#L181
|
||||
Self {
|
||||
threshold: 8_f32,
|
||||
min_std_deviation_millis: 100_f32,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PhiAccrualFailureDetector {
|
||||
pub(crate) fn heartbeat(&mut self, ts_millis: i64) {
|
||||
if let Some(last_heartbeat_millis) = self.last_heartbeat_millis {
|
||||
if ts_millis < last_heartbeat_millis {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.is_available(ts_millis) {
|
||||
let interval = ts_millis - last_heartbeat_millis;
|
||||
self.heartbeat_history.add(interval)
|
||||
}
|
||||
} else {
|
||||
// guess statistics for first heartbeat,
|
||||
// important so that connections with only one heartbeat becomes unavailable
|
||||
// bootstrap with 2 entries with rather high standard deviation
|
||||
let std_deviation = self.first_heartbeat_estimate_millis / 4;
|
||||
self.heartbeat_history
|
||||
.add((self.first_heartbeat_estimate_millis - std_deviation) as _);
|
||||
self.heartbeat_history
|
||||
.add((self.first_heartbeat_estimate_millis + std_deviation) as _);
|
||||
}
|
||||
let _ = self.last_heartbeat_millis.insert(ts_millis);
|
||||
}
|
||||
|
||||
pub(crate) fn is_available(&self, ts_millis: i64) -> bool {
|
||||
self.phi(ts_millis) < self.threshold as _
|
||||
}
|
||||
|
||||
/// The suspicion level of the accrual failure detector.
|
||||
///
|
||||
/// If a connection does not have any records in failure detector then it is considered healthy.
|
||||
pub(crate) fn phi(&self, ts_millis: i64) -> f64 {
|
||||
if let Some(last_heartbeat_millis) = self.last_heartbeat_millis {
|
||||
let time_diff = ts_millis - last_heartbeat_millis;
|
||||
let mean = self.heartbeat_history.mean();
|
||||
let std_deviation = self
|
||||
.heartbeat_history
|
||||
.std_deviation()
|
||||
.max(self.min_std_deviation_millis as _);
|
||||
|
||||
phi(
|
||||
time_diff,
|
||||
mean + self.acceptable_heartbeat_pause_millis as f64,
|
||||
std_deviation,
|
||||
)
|
||||
} else {
|
||||
// treat unmanaged connections, e.g. with zero heartbeats, as healthy connections
|
||||
0.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn threshold(&self) -> f32 {
|
||||
self.threshold
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn acceptable_heartbeat_pause_millis(&self) -> u32 {
|
||||
self.acceptable_heartbeat_pause_millis
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculation of phi, derived from the Cumulative distribution function for
|
||||
/// N(mean, stdDeviation) normal distribution, given by
|
||||
/// 1.0 / (1.0 + math.exp(-y * (1.5976 + 0.070566 * y * y)))
|
||||
/// where y = (x - mean) / standard_deviation
|
||||
/// This is an approximation defined in β Mathematics Handbook (Logistic approximation).
|
||||
/// Error is 0.00014 at +- 3.16
|
||||
/// The calculated value is equivalent to -log10(1 - CDF(y))
|
||||
///
|
||||
/// Usually phi = 1 means likeliness that we will make a mistake is about 10%.
|
||||
/// The likeliness is about 1% with phi = 2, 0.1% with phi = 3 and so on.
|
||||
fn phi(time_diff: i64, mean: f64, std_deviation: f64) -> f64 {
|
||||
assert_ne!(std_deviation, 0.0);
|
||||
|
||||
let time_diff = time_diff as f64;
|
||||
let y = (time_diff - mean) / std_deviation;
|
||||
let e = (-y * (1.5976 + 0.070566 * y * y)).exp();
|
||||
if time_diff > mean {
|
||||
-(e / (1.0 + e)).log10()
|
||||
} else {
|
||||
-(1.0 - 1.0 / (1.0 + e)).log10()
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds the heartbeat statistics.
|
||||
/// It is capped by the number of samples specified in `max_sample_size`.
|
||||
///
|
||||
/// The stats (mean, variance, std_deviation) are not defined for empty HeartbeatHistory.
|
||||
#[derive(Clone)]
|
||||
struct HeartbeatHistory {
|
||||
/// Number of samples to use for calculation of mean and standard deviation of inter-arrival
|
||||
/// times.
|
||||
max_sample_size: u32,
|
||||
|
||||
intervals: VecDeque<i64>,
|
||||
interval_sum: i64,
|
||||
squared_interval_sum: i64,
|
||||
}
|
||||
|
||||
impl HeartbeatHistory {
|
||||
fn new(max_sample_size: u32) -> Self {
|
||||
Self {
|
||||
max_sample_size,
|
||||
intervals: VecDeque::with_capacity(max_sample_size as usize),
|
||||
interval_sum: 0,
|
||||
squared_interval_sum: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn mean(&self) -> f64 {
|
||||
self.interval_sum as f64 / self.intervals.len() as f64
|
||||
}
|
||||
|
||||
fn variance(&self) -> f64 {
|
||||
let mean = self.mean();
|
||||
self.squared_interval_sum as f64 / self.intervals.len() as f64 - mean * mean
|
||||
}
|
||||
|
||||
fn std_deviation(&self) -> f64 {
|
||||
self.variance().sqrt()
|
||||
}
|
||||
|
||||
fn add(&mut self, interval: i64) {
|
||||
if self.intervals.len() as u32 >= self.max_sample_size {
|
||||
self.drop_oldest();
|
||||
}
|
||||
self.intervals.push_back(interval);
|
||||
self.interval_sum += interval;
|
||||
self.squared_interval_sum += interval * interval;
|
||||
}
|
||||
|
||||
fn drop_oldest(&mut self) {
|
||||
let oldest = self
|
||||
.intervals
|
||||
.pop_front()
|
||||
.expect("intervals must not be empty here");
|
||||
self.interval_sum -= oldest;
|
||||
self.squared_interval_sum -= oldest * oldest;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_time::util::current_time_millis;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_is_available() {
|
||||
let ts_millis = current_time_millis();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
|
||||
// is available before first heartbeat
|
||||
assert!(fd.is_available(ts_millis));
|
||||
|
||||
fd.heartbeat(ts_millis);
|
||||
|
||||
let acceptable_heartbeat_pause_millis = fd.acceptable_heartbeat_pause_millis as i64;
|
||||
// is available when heartbeat
|
||||
assert!(fd.is_available(ts_millis));
|
||||
// is available before heartbeat timeout
|
||||
assert!(fd.is_available(ts_millis + acceptable_heartbeat_pause_millis / 2));
|
||||
// is not available after heartbeat timeout
|
||||
assert!(!fd.is_available(ts_millis + acceptable_heartbeat_pause_millis * 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_heartbeat() {
|
||||
let ts_millis = current_time_millis();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
|
||||
// no heartbeat yet
|
||||
assert!(fd.last_heartbeat_millis.is_none());
|
||||
|
||||
fd.heartbeat(ts_millis);
|
||||
assert_eq!(fd.last_heartbeat_millis, Some(ts_millis));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_phi() {
|
||||
let ts_millis = current_time_millis();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
|
||||
// phi == 0 before first heartbeat
|
||||
assert_eq!(fd.phi(ts_millis), 0.0);
|
||||
|
||||
fd.heartbeat(ts_millis);
|
||||
|
||||
let acceptable_heartbeat_pause_millis = fd.acceptable_heartbeat_pause_millis as i64;
|
||||
// phi == 0 when heartbeat
|
||||
assert_eq!(fd.phi(ts_millis), 0.0);
|
||||
// phi < threshold before heartbeat timeout
|
||||
let now = ts_millis + acceptable_heartbeat_pause_millis / 2;
|
||||
assert!(fd.phi(now) < fd.threshold as _);
|
||||
// phi >= threshold after heartbeat timeout
|
||||
let now = ts_millis + acceptable_heartbeat_pause_millis * 2;
|
||||
assert!(fd.phi(now) >= fd.threshold as _);
|
||||
}
|
||||
|
||||
// The following test cases are port from Akka's test:
|
||||
// [AccrualFailureDetectorSpec.scala](https://github.com/akka/akka/blob/main/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala).
|
||||
|
||||
#[test]
|
||||
fn test_use_good_enough_cumulative_distribution_function() {
|
||||
fn cdf(phi: f64) -> f64 {
|
||||
1.0 - 10.0_f64.powf(-phi)
|
||||
}
|
||||
|
||||
assert!((cdf(phi(0, 0.0, 10.0)) - 0.5).abs() < 0.001);
|
||||
assert!((cdf(phi(6, 0.0, 10.0)) - 0.7257).abs() < 0.001);
|
||||
assert!((cdf(phi(15, 0.0, 10.0)) - 0.9332).abs() < 0.001);
|
||||
assert!((cdf(phi(20, 0.0, 10.0)) - 0.97725).abs() < 0.001);
|
||||
assert!((cdf(phi(25, 0.0, 10.0)) - 0.99379).abs() < 0.001);
|
||||
assert!((cdf(phi(35, 0.0, 10.0)) - 0.99977).abs() < 0.001);
|
||||
assert!((cdf(phi(40, 0.0, 10.0)) - 0.99997).abs() < 0.0001);
|
||||
|
||||
for w in (0..40).collect::<Vec<i64>>().windows(2) {
|
||||
assert!(phi(w[0], 0.0, 10.0) < phi(w[1], 0.0, 10.0));
|
||||
}
|
||||
|
||||
assert!((cdf(phi(22, 20.0, 3.0)) - 0.7475).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_outliers_without_losing_precision_or_hitting_exceptions() {
|
||||
assert!((phi(10, 0.0, 1.0) - 38.0).abs() < 1.0);
|
||||
assert_eq!(phi(-25, 0.0, 1.0), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_realistic_phi_values() {
|
||||
let test = vec![
|
||||
(0, 0.0),
|
||||
(500, 0.1),
|
||||
(1000, 0.3),
|
||||
(1200, 1.6),
|
||||
(1400, 4.7),
|
||||
(1600, 10.8),
|
||||
(1700, 15.3),
|
||||
];
|
||||
for (time_diff, expected_phi) in test {
|
||||
assert!((phi(time_diff, 1000.0, 100.0) - expected_phi).abs() < 0.1);
|
||||
}
|
||||
|
||||
// larger std_deviation results => lower phi
|
||||
assert!(phi(1100, 1000.0, 500.0) < phi(1100, 1000.0, 100.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_phi_of_0_on_startup_when_no_heartbeats() {
|
||||
let fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
assert_eq!(fd.phi(current_time_millis()), 0.0);
|
||||
assert_eq!(fd.phi(current_time_millis()), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_phi_based_on_guess_when_only_one_heartbeat() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
assert!((fd.phi(1000)).abs() - 0.3 < 0.2);
|
||||
assert!((fd.phi(2000)).abs() - 4.5 < 0.3);
|
||||
assert!((fd.phi(3000)).abs() > 15.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_phi_using_first_interval_after_second_heartbeat() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
assert!(fd.phi(100) > 0.0);
|
||||
fd.heartbeat(200);
|
||||
assert!(fd.phi(300) > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_available_after_a_series_of_successful_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
assert!(fd.last_heartbeat_millis.is_none());
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(1100);
|
||||
assert!(fd.last_heartbeat_millis.is_some());
|
||||
assert!(fd.is_available(1200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_not_available_if_heartbeat_are_missed() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 3.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(1100);
|
||||
assert!(fd.is_available(1200));
|
||||
assert!(!fd.is_available(8200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_available_if_it_starts_heartbeat_again_after_being_marked_dead_due_to_detection_of_failure(
|
||||
) {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
|
||||
// 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger
|
||||
// unreachable again
|
||||
|
||||
let mut now = 0;
|
||||
for _ in 0..1000 {
|
||||
fd.heartbeat(now);
|
||||
now += 1000;
|
||||
}
|
||||
now += 5 * 60 * 1000;
|
||||
assert!(!fd.is_available(now)); // after the long pause
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 900;
|
||||
assert!(fd.is_available(now));
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 7000;
|
||||
assert!(!fd.is_available(now)); // after the 7 seconds pause
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 900;
|
||||
assert!(fd.is_available(now));
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 900;
|
||||
assert!(fd.is_available(now));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accept_some_configured_missing_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(2000);
|
||||
fd.heartbeat(3000);
|
||||
assert!(fd.is_available(7000));
|
||||
fd.heartbeat(8000);
|
||||
assert!(fd.is_available(9000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_after_configured_acceptable_missing_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(2000);
|
||||
fd.heartbeat(3000);
|
||||
fd.heartbeat(4000);
|
||||
fd.heartbeat(5000);
|
||||
assert!(fd.is_available(5500));
|
||||
fd.heartbeat(6000);
|
||||
assert!(!fd.is_available(11000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_use_max_sample_size_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(3),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
// 100 ms interval
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(100);
|
||||
fd.heartbeat(200);
|
||||
fd.heartbeat(300);
|
||||
let phi1 = fd.phi(400);
|
||||
// 500 ms interval, should become same phi when 100 ms intervals have been dropped
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(1500);
|
||||
fd.heartbeat(2000);
|
||||
fd.heartbeat(2500);
|
||||
let phi2 = fd.phi(3000);
|
||||
assert_eq!(phi1, phi2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_history_calculate_correct_mean_and_variance() {
|
||||
let mut history = HeartbeatHistory::new(20);
|
||||
for i in [100, 200, 125, 340, 130] {
|
||||
history.add(i);
|
||||
}
|
||||
assert!((history.mean() - 179.0).abs() < 0.00001);
|
||||
assert!((history.variance() - 7584.0).abs() < 0.00001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_history_have_0_variance_for_one_sample() {
|
||||
let mut history = HeartbeatHistory::new(600);
|
||||
history.add(1000);
|
||||
assert!((history.variance() - 0.0).abs() < 0.00001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_history_be_capped_by_the_specified_max_sample_size() {
|
||||
let mut history = HeartbeatHistory::new(3);
|
||||
history.add(100);
|
||||
history.add(110);
|
||||
history.add(90);
|
||||
assert!((history.mean() - 100.0).abs() < 0.00001);
|
||||
assert!((history.variance() - 66.6666667).abs() < 0.00001);
|
||||
history.add(140);
|
||||
assert!((history.mean() - 113.333333).abs() < 0.00001);
|
||||
assert!((history.variance() - 422.222222).abs() < 0.00001);
|
||||
history.add(80);
|
||||
assert!((history.mean() - 103.333333).abs() < 0.00001);
|
||||
assert!((history.variance() - 688.88888889).abs() < 0.00001);
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub use check_leader_handler::CheckLeaderHandler;
|
||||
pub use collect_stats_handler::CollectStatsHandler;
|
||||
pub use failure_handler::RegionFailureHandler;
|
||||
pub use keep_lease_handler::KeepLeaseHandler;
|
||||
pub use on_leader_start::OnLeaderStartHandler;
|
||||
pub use persist_stats_handler::PersistStatsHandler;
|
||||
@@ -21,6 +22,7 @@ pub use response_header_handler::ResponseHeaderHandler;
|
||||
|
||||
mod check_leader_handler;
|
||||
mod collect_stats_handler;
|
||||
mod failure_handler;
|
||||
mod instruction;
|
||||
mod keep_lease_handler;
|
||||
pub mod node_stat;
|
||||
@@ -54,8 +56,8 @@ pub trait HeartbeatHandler: Send + Sync {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct HeartbeatAccumulator {
|
||||
pub header: Option<ResponseHeader>,
|
||||
pub stats: Vec<Stat>,
|
||||
pub instructions: Vec<Instruction>,
|
||||
pub stat: Option<Stat>,
|
||||
}
|
||||
|
||||
impl HeartbeatAccumulator {
|
||||
|
||||
@@ -12,39 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use api::v1::meta::HeartbeatRequest;
|
||||
use common_telemetry::debug;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
|
||||
use super::node_stat::Stat;
|
||||
use crate::error::Result;
|
||||
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
|
||||
use crate::metasrv::Context;
|
||||
|
||||
type StatKey = (u64, u64);
|
||||
|
||||
pub struct CollectStatsHandler {
|
||||
max_cached_stats_per_key: usize,
|
||||
cache: DashMap<StatKey, VecDeque<Stat>>,
|
||||
}
|
||||
|
||||
impl Default for CollectStatsHandler {
|
||||
fn default() -> Self {
|
||||
Self::new(10)
|
||||
}
|
||||
}
|
||||
|
||||
impl CollectStatsHandler {
|
||||
pub fn new(max_cached_stats_per_key: usize) -> Self {
|
||||
Self {
|
||||
max_cached_stats_per_key,
|
||||
cache: DashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct CollectStatsHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeartbeatHandler for CollectStatsHandler {
|
||||
@@ -60,21 +36,7 @@ impl HeartbeatHandler for CollectStatsHandler {
|
||||
|
||||
match Stat::try_from(req.clone()) {
|
||||
Ok(stat) => {
|
||||
let key = (stat.cluster_id, stat.id);
|
||||
match self.cache.entry(key) {
|
||||
Entry::Occupied(mut e) => {
|
||||
let deque = e.get_mut();
|
||||
deque.push_front(stat);
|
||||
if deque.len() >= self.max_cached_stats_per_key {
|
||||
acc.stats = deque.drain(..).collect();
|
||||
}
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
let mut stat_vec = VecDeque::with_capacity(self.max_cached_stats_per_key);
|
||||
stat_vec.push_front(stat);
|
||||
e.insert(stat_vec);
|
||||
}
|
||||
}
|
||||
let _ = acc.stat.insert(stat);
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Incomplete heartbeat data: {:?}", req);
|
||||
|
||||
151
src/meta-srv/src/handler/failure_handler.rs
Normal file
151
src/meta-srv/src/handler/failure_handler.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod runner;
|
||||
|
||||
use api::v1::meta::HeartbeatRequest;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::handler::failure_handler::runner::{FailureDetectControl, FailureDetectRunner};
|
||||
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
|
||||
use crate::metasrv::{Context, ElectionRef};
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Clone)]
|
||||
pub(crate) struct RegionIdent {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
region_id: u64,
|
||||
}
|
||||
|
||||
// TODO(LFC): TBC
|
||||
pub(crate) struct DatanodeHeartbeat {
|
||||
#[allow(dead_code)]
|
||||
cluster_id: u64,
|
||||
#[allow(dead_code)]
|
||||
node_id: u64,
|
||||
region_idents: Vec<RegionIdent>,
|
||||
heartbeat_time: i64,
|
||||
}
|
||||
|
||||
pub struct RegionFailureHandler {
|
||||
failure_detect_runner: FailureDetectRunner,
|
||||
}
|
||||
|
||||
impl RegionFailureHandler {
|
||||
pub fn new(election: Option<ElectionRef>) -> Self {
|
||||
Self {
|
||||
failure_detect_runner: FailureDetectRunner::new(election),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&mut self) {
|
||||
self.failure_detect_runner.start().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HeartbeatHandler for RegionFailureHandler {
|
||||
async fn handle(
|
||||
&self,
|
||||
_: &HeartbeatRequest,
|
||||
ctx: &mut Context,
|
||||
acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<()> {
|
||||
if ctx.is_infancy {
|
||||
self.failure_detect_runner
|
||||
.send_control(FailureDetectControl::Purge)
|
||||
.await;
|
||||
}
|
||||
|
||||
if ctx.is_skip_all() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
|
||||
|
||||
let heartbeat = DatanodeHeartbeat {
|
||||
cluster_id: stat.cluster_id,
|
||||
node_id: stat.id,
|
||||
region_idents: stat
|
||||
.region_stats
|
||||
.iter()
|
||||
.map(|x| RegionIdent {
|
||||
catalog: x.catalog.clone(),
|
||||
schema: x.schema.clone(),
|
||||
table: x.table.clone(),
|
||||
region_id: x.id,
|
||||
})
|
||||
.collect(),
|
||||
heartbeat_time: stat.timestamp_millis,
|
||||
};
|
||||
|
||||
self.failure_detect_runner.send_heartbeat(heartbeat).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::handler::node_stat::{RegionStat, Stat};
|
||||
use crate::metasrv::builder::MetaSrvBuilder;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_handle_heartbeat() {
|
||||
let mut handler = RegionFailureHandler::new(None);
|
||||
handler.start().await;
|
||||
|
||||
let req = &HeartbeatRequest::default();
|
||||
|
||||
let builder = MetaSrvBuilder::new();
|
||||
let metasrv = builder.build().await;
|
||||
let mut ctx = metasrv.new_ctx();
|
||||
ctx.is_infancy = false;
|
||||
|
||||
let acc = &mut HeartbeatAccumulator::default();
|
||||
fn new_region_stat(region_id: u64) -> RegionStat {
|
||||
RegionStat {
|
||||
id: region_id,
|
||||
catalog: "a".to_string(),
|
||||
schema: "b".to_string(),
|
||||
table: "c".to_string(),
|
||||
rcus: 0,
|
||||
wcus: 0,
|
||||
approximate_bytes: 0,
|
||||
approximate_rows: 0,
|
||||
}
|
||||
}
|
||||
acc.stat = Some(Stat {
|
||||
cluster_id: 1,
|
||||
id: 42,
|
||||
region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)],
|
||||
timestamp_millis: 1000,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
handler.handle(req, &mut ctx, acc).await.unwrap();
|
||||
|
||||
let dump = handler.failure_detect_runner.dump().await;
|
||||
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 3);
|
||||
|
||||
// infancy makes heartbeats re-accumulated
|
||||
ctx.is_infancy = true;
|
||||
acc.stat = None;
|
||||
handler.handle(req, &mut ctx, acc).await.unwrap();
|
||||
let dump = handler.failure_detect_runner.dump().await;
|
||||
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 0);
|
||||
}
|
||||
}
|
||||
313
src/meta-srv/src/handler/failure_handler/runner.rs
Normal file
313
src/meta-srv/src/handler/failure_handler/runner.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use common_telemetry::{error, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use dashmap::mapref::multiple::RefMulti;
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::failure_detector::PhiAccrualFailureDetector;
|
||||
use crate::handler::failure_handler::{DatanodeHeartbeat, RegionIdent};
|
||||
use crate::metasrv::ElectionRef;
|
||||
|
||||
pub(crate) enum FailureDetectControl {
|
||||
Purge,
|
||||
|
||||
#[cfg(test)]
|
||||
Dump(tokio::sync::oneshot::Sender<FailureDetectorContainer>),
|
||||
}
|
||||
|
||||
pub(crate) struct FailureDetectRunner {
|
||||
election: Option<ElectionRef>,
|
||||
|
||||
heartbeat_tx: Sender<DatanodeHeartbeat>,
|
||||
heartbeat_rx: Option<Receiver<DatanodeHeartbeat>>,
|
||||
|
||||
control_tx: Sender<FailureDetectControl>,
|
||||
control_rx: Option<Receiver<FailureDetectControl>>,
|
||||
|
||||
receiver_handle: Option<JoinHandle<()>>,
|
||||
runner_handle: Option<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FailureDetectRunner {
|
||||
pub(crate) fn new(election: Option<ElectionRef>) -> Self {
|
||||
let (heartbeat_tx, heartbeat_rx) = mpsc::channel::<DatanodeHeartbeat>(1024);
|
||||
let (control_tx, control_rx) = mpsc::channel::<FailureDetectControl>(1024);
|
||||
Self {
|
||||
election,
|
||||
heartbeat_tx,
|
||||
heartbeat_rx: Some(heartbeat_rx),
|
||||
control_tx,
|
||||
control_rx: Some(control_rx),
|
||||
receiver_handle: None,
|
||||
runner_handle: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn send_heartbeat(&self, heartbeat: DatanodeHeartbeat) {
|
||||
if let Err(e) = self.heartbeat_tx.send(heartbeat).await {
|
||||
error!("FailureDetectRunner is stop receiving heartbeats: {}", e)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn send_control(&self, control: FailureDetectControl) {
|
||||
if let Err(e) = self.control_tx.send(control).await {
|
||||
error!("FailureDetectRunner is stop receiving controls: {}", e)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn start(&mut self) {
|
||||
let failure_detectors = Arc::new(FailureDetectorContainer(DashMap::new()));
|
||||
self.start_with(failure_detectors).await
|
||||
}
|
||||
|
||||
async fn start_with(&mut self, failure_detectors: Arc<FailureDetectorContainer>) {
|
||||
let Some(mut heartbeat_rx) = self.heartbeat_rx.take() else { return };
|
||||
let Some(mut control_rx) = self.control_rx.take() else { return };
|
||||
|
||||
let container = failure_detectors.clone();
|
||||
let receiver_handle = common_runtime::spawn_bg(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(control) = control_rx.recv() => {
|
||||
match control {
|
||||
FailureDetectControl::Purge => container.clear(),
|
||||
|
||||
#[cfg(test)]
|
||||
FailureDetectControl::Dump(tx) => {
|
||||
// Drain any heartbeats that are not handled before dump.
|
||||
while let Ok(heartbeat) = heartbeat_rx.try_recv() {
|
||||
for ident in heartbeat.region_idents {
|
||||
let mut detector = container.get_failure_detector(ident);
|
||||
detector.heartbeat(heartbeat.heartbeat_time);
|
||||
}
|
||||
}
|
||||
let _ = tx.send(container.dump());
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(heartbeat) = heartbeat_rx.recv() => {
|
||||
for ident in heartbeat.region_idents {
|
||||
let mut detector = container.get_failure_detector(ident);
|
||||
detector.heartbeat(heartbeat.heartbeat_time);
|
||||
}
|
||||
}
|
||||
else => {
|
||||
warn!("Both control and heartbeat senders are closed, quit receiving.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
self.receiver_handle = Some(receiver_handle);
|
||||
|
||||
let election = self.election.clone();
|
||||
let runner_handle = common_runtime::spawn_bg(async move {
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
|
||||
let is_leader = election.as_ref().map(|x| x.is_leader()).unwrap_or(true);
|
||||
if is_leader {
|
||||
for e in failure_detectors.iter() {
|
||||
if e.failure_detector().is_available(current_time_millis()) {
|
||||
// TODO(LFC): TBC
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = Instant::now().duration_since(start);
|
||||
if let Some(sleep) = Duration::from_secs(1).checked_sub(elapsed) {
|
||||
tokio::time::sleep(sleep).await;
|
||||
} // else the elapsed time is exceeding one second, we should continue working immediately
|
||||
}
|
||||
});
|
||||
self.runner_handle = Some(runner_handle);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn abort(&mut self) {
|
||||
let Some(handle) = self.receiver_handle.take() else { return };
|
||||
handle.abort();
|
||||
|
||||
let Some(handle) = self.runner_handle.take() else { return };
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) async fn dump(&self) -> FailureDetectorContainer {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
self.send_control(FailureDetectControl::Dump(tx)).await;
|
||||
rx.await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct FailureDetectorEntry<'a> {
|
||||
e: RefMulti<'a, RegionIdent, PhiAccrualFailureDetector>,
|
||||
}
|
||||
|
||||
impl FailureDetectorEntry<'_> {
|
||||
fn failure_detector(&self) -> &PhiAccrualFailureDetector {
|
||||
self.e.value()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct FailureDetectorContainer(DashMap<RegionIdent, PhiAccrualFailureDetector>);
|
||||
|
||||
impl FailureDetectorContainer {
|
||||
fn get_failure_detector(
|
||||
&self,
|
||||
ident: RegionIdent,
|
||||
) -> impl DerefMut<Target = PhiAccrualFailureDetector> + '_ {
|
||||
self.0
|
||||
.entry(ident)
|
||||
.or_insert_with(PhiAccrualFailureDetector::default)
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self) -> Box<dyn Iterator<Item = FailureDetectorEntry> + '_> {
|
||||
Box::new(self.0.iter().map(move |e| FailureDetectorEntry { e })) as _
|
||||
}
|
||||
|
||||
fn clear(&self) {
|
||||
self.0.clear()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn dump(&self) -> FailureDetectorContainer {
|
||||
let mut m = DashMap::with_capacity(self.0.len());
|
||||
m.extend(self.0.iter().map(|x| (x.key().clone(), x.value().clone())));
|
||||
Self(m)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::Rng;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_failure_detector_container() {
|
||||
let container = FailureDetectorContainer(DashMap::new());
|
||||
let ident = RegionIdent {
|
||||
catalog: "a".to_string(),
|
||||
schema: "b".to_string(),
|
||||
table: "c".to_string(),
|
||||
region_id: 1,
|
||||
};
|
||||
let _ = container.get_failure_detector(ident.clone());
|
||||
assert!(container.0.contains_key(&ident));
|
||||
|
||||
{
|
||||
let mut iter = container.iter();
|
||||
assert!(iter.next().is_some());
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
container.clear();
|
||||
assert!(container.0.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_control() {
|
||||
let container = FailureDetectorContainer(DashMap::new());
|
||||
|
||||
let ident = RegionIdent {
|
||||
catalog: "a".to_string(),
|
||||
schema: "b".to_string(),
|
||||
table: "c".to_string(),
|
||||
region_id: 1,
|
||||
};
|
||||
container.get_failure_detector(ident.clone());
|
||||
|
||||
let mut runner = FailureDetectRunner::new(None);
|
||||
runner.start_with(Arc::new(container)).await;
|
||||
|
||||
let dump = runner.dump().await;
|
||||
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 1);
|
||||
|
||||
runner.send_control(FailureDetectControl::Purge).await;
|
||||
|
||||
let dump = runner.dump().await;
|
||||
assert_eq!(dump.iter().collect::<Vec<_>>().len(), 0);
|
||||
|
||||
runner.abort();
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_heartbeat() {
|
||||
let mut runner = FailureDetectRunner::new(None);
|
||||
runner.start().await;
|
||||
|
||||
// Generate 2000 heartbeats start from now. Heartbeat interval is one second, plus some random millis.
|
||||
fn generate_heartbeats(node_id: u64, region_ids: Vec<u64>) -> Vec<DatanodeHeartbeat> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let start = current_time_millis();
|
||||
(0..2000)
|
||||
.map(|i| DatanodeHeartbeat {
|
||||
cluster_id: 1,
|
||||
node_id,
|
||||
region_idents: region_ids
|
||||
.iter()
|
||||
.map(|®ion_id| RegionIdent {
|
||||
catalog: "a".to_string(),
|
||||
schema: "b".to_string(),
|
||||
table: "c".to_string(),
|
||||
region_id,
|
||||
})
|
||||
.collect(),
|
||||
heartbeat_time: start + i * 1000 + rng.gen_range(0..100),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
let heartbeats = generate_heartbeats(100, vec![1, 2, 3]);
|
||||
let last_heartbeat_time = heartbeats.last().unwrap().heartbeat_time;
|
||||
for heartbeat in heartbeats {
|
||||
runner.send_heartbeat(heartbeat).await;
|
||||
}
|
||||
|
||||
let dump = runner.dump().await;
|
||||
let failure_detectors = dump.iter().collect::<Vec<_>>();
|
||||
assert_eq!(failure_detectors.len(), 3);
|
||||
|
||||
failure_detectors.iter().for_each(|e| {
|
||||
let fd = e.failure_detector();
|
||||
let acceptable_heartbeat_pause_millis = fd.acceptable_heartbeat_pause_millis() as i64;
|
||||
let start = last_heartbeat_time;
|
||||
|
||||
// Within the "acceptable_heartbeat_pause_millis" period, phi is zero ...
|
||||
for i in 1..=acceptable_heartbeat_pause_millis / 1000 {
|
||||
let now = start + i * 1000;
|
||||
assert_eq!(fd.phi(now), 0.0);
|
||||
}
|
||||
|
||||
// ... then in less than two seconds, phi is above the threshold.
|
||||
// The same effect can be seen in the diagrams in Akka's document.
|
||||
let now = start + acceptable_heartbeat_pause_millis + 1000;
|
||||
assert!(fd.phi(now) < fd.threshold() as _);
|
||||
let now = start + acceptable_heartbeat_pause_millis + 2000;
|
||||
assert!(fd.phi(now) > fd.threshold() as _);
|
||||
});
|
||||
|
||||
runner.abort();
|
||||
}
|
||||
}
|
||||
@@ -31,6 +31,7 @@ impl HeartbeatHandler for OnLeaderStartHandler {
|
||||
) -> Result<()> {
|
||||
if let Some(election) = &ctx.election {
|
||||
if election.in_infancy() {
|
||||
ctx.is_infancy = true;
|
||||
ctx.reset_in_memory();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,14 +13,20 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, PutRequest};
|
||||
use dashmap::DashMap;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::handler::node_stat::Stat;
|
||||
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
|
||||
use crate::keys::StatValue;
|
||||
use crate::keys::{StatKey, StatValue};
|
||||
use crate::metasrv::Context;
|
||||
|
||||
const MAX_CACHED_STATS_PER_KEY: usize = 10;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PersistStatsHandler;
|
||||
pub struct PersistStatsHandler {
|
||||
stats_cache: DashMap<StatKey, Vec<Stat>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeartbeatHandler for PersistStatsHandler {
|
||||
@@ -30,18 +36,25 @@ impl HeartbeatHandler for PersistStatsHandler {
|
||||
ctx: &mut Context,
|
||||
acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<()> {
|
||||
if ctx.is_skip_all() || acc.stats.is_empty() {
|
||||
if ctx.is_skip_all() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let stats = &mut acc.stats;
|
||||
let key = match stats.get(0) {
|
||||
Some(stat) => stat.stat_key(),
|
||||
None => return Ok(()),
|
||||
};
|
||||
let Some(stat) = acc.stat.take() else { return Ok(()) };
|
||||
|
||||
// take stats from &mut acc.stats, avoid clone of vec
|
||||
let stats = std::mem::take(stats);
|
||||
let key = stat.stat_key();
|
||||
let mut entry = self
|
||||
.stats_cache
|
||||
.entry(key)
|
||||
.or_insert_with(|| Vec::with_capacity(MAX_CACHED_STATS_PER_KEY));
|
||||
let stats = entry.value_mut();
|
||||
stats.push(stat);
|
||||
|
||||
if stats.len() < MAX_CACHED_STATS_PER_KEY {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let stats = stats.drain(..).collect();
|
||||
|
||||
let val = StatValue { stats };
|
||||
|
||||
@@ -65,7 +78,6 @@ mod tests {
|
||||
use api::v1::meta::RangeRequest;
|
||||
|
||||
use super::*;
|
||||
use crate::handler::node_stat::Stat;
|
||||
use crate::keys::StatKey;
|
||||
use crate::service::store::memory::MemStore;
|
||||
|
||||
@@ -83,24 +95,23 @@ mod tests {
|
||||
catalog: None,
|
||||
schema: None,
|
||||
table: None,
|
||||
is_infancy: false,
|
||||
};
|
||||
|
||||
let req = HeartbeatRequest::default();
|
||||
let mut acc = HeartbeatAccumulator {
|
||||
stats: vec![Stat {
|
||||
cluster_id: 3,
|
||||
id: 101,
|
||||
region_num: Some(100),
|
||||
let handler = PersistStatsHandler::default();
|
||||
for i in 1..=MAX_CACHED_STATS_PER_KEY {
|
||||
let mut acc = HeartbeatAccumulator {
|
||||
stat: Some(Stat {
|
||||
cluster_id: 3,
|
||||
id: 101,
|
||||
region_num: Some(i as _),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let stats_handler = PersistStatsHandler;
|
||||
stats_handler
|
||||
.handle(&req, &mut ctx, &mut acc)
|
||||
.await
|
||||
.unwrap();
|
||||
};
|
||||
handler.handle(&req, &mut ctx, &mut acc).await.unwrap();
|
||||
}
|
||||
|
||||
let key = StatKey {
|
||||
cluster_id: 3,
|
||||
@@ -124,7 +135,7 @@ mod tests {
|
||||
|
||||
let val: StatValue = kv.value.clone().try_into().unwrap();
|
||||
|
||||
assert_eq!(1, val.stats.len());
|
||||
assert_eq!(Some(100), val.stats[0].region_num);
|
||||
assert_eq!(10, val.stats.len());
|
||||
assert_eq!(Some(1), val.stats[0].region_num);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,7 @@ mod tests {
|
||||
catalog: None,
|
||||
schema: None,
|
||||
table: None,
|
||||
is_infancy: false,
|
||||
};
|
||||
|
||||
let req = HeartbeatRequest {
|
||||
|
||||
@@ -178,7 +178,16 @@ pub(crate) fn to_removed_key(key: &str) -> String {
|
||||
format!("{REMOVED_PREFIX}-{key}")
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug, Clone, Hash)]
|
||||
pub fn build_table_route_prefix(catalog: impl AsRef<str>, schema: impl AsRef<str>) -> String {
|
||||
format!(
|
||||
"{}-{}-{}-",
|
||||
TABLE_ROUTE_PREFIX,
|
||||
catalog.as_ref(),
|
||||
schema.as_ref()
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug, Clone, Hash, Copy)]
|
||||
pub struct StatKey {
|
||||
pub cluster_id: u64,
|
||||
pub node_id: u64,
|
||||
@@ -279,6 +288,14 @@ impl TryFrom<Vec<u8>> for StatValue {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_build_prefix() {
|
||||
assert_eq!(
|
||||
"__meta_table_route-CATALOG-SCHEMA-",
|
||||
build_table_route_prefix("CATALOG", "SCHEMA")
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stat_key_round_trip() {
|
||||
let key = StatKey {
|
||||
|
||||
@@ -17,6 +17,7 @@ pub mod bootstrap;
|
||||
pub mod cluster;
|
||||
pub mod election;
|
||||
pub mod error;
|
||||
mod failure_detector;
|
||||
pub mod handler;
|
||||
pub mod keys;
|
||||
pub mod lease;
|
||||
|
||||
@@ -66,6 +66,7 @@ pub struct Context {
|
||||
pub catalog: Option<String>,
|
||||
pub schema: Option<String>,
|
||||
pub table: Option<String>,
|
||||
pub is_infancy: bool,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
@@ -199,6 +200,7 @@ impl MetaSrv {
|
||||
catalog: None,
|
||||
schema: None,
|
||||
table: None,
|
||||
is_infancy: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::Arc;
|
||||
use crate::cluster::MetaPeerClient;
|
||||
use crate::handler::{
|
||||
CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, KeepLeaseHandler,
|
||||
OnLeaderStartHandler, PersistStatsHandler, ResponseHeaderHandler,
|
||||
OnLeaderStartHandler, PersistStatsHandler, RegionFailureHandler, ResponseHeaderHandler,
|
||||
};
|
||||
use crate::lock::DistLockRef;
|
||||
use crate::metasrv::{ElectionRef, MetaSrv, MetaSrvOptions, SelectorRef, TABLE_ID_SEQ};
|
||||
@@ -118,6 +118,9 @@ impl MetaSrvBuilder {
|
||||
let handler_group = match handler_group {
|
||||
Some(handler_group) => handler_group,
|
||||
None => {
|
||||
let mut region_failure_handler = RegionFailureHandler::new(election.clone());
|
||||
region_failure_handler.start().await;
|
||||
|
||||
let group = HeartbeatHandlerGroup::default();
|
||||
let keep_lease_handler = KeepLeaseHandler::new(kv_store.clone());
|
||||
group.add_handler(ResponseHeaderHandler::default()).await;
|
||||
@@ -127,7 +130,8 @@ impl MetaSrvBuilder {
|
||||
group.add_handler(keep_lease_handler).await;
|
||||
group.add_handler(CheckLeaderHandler::default()).await;
|
||||
group.add_handler(OnLeaderStartHandler::default()).await;
|
||||
group.add_handler(CollectStatsHandler::default()).await;
|
||||
group.add_handler(CollectStatsHandler).await;
|
||||
group.add_handler(region_failure_handler).await;
|
||||
group.add_handler(PersistStatsHandler::default()).await;
|
||||
group
|
||||
}
|
||||
|
||||
@@ -150,7 +150,9 @@ impl Inner {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{BatchGetRequest, BatchGetResponse};
|
||||
use api::v1::meta::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
use crate::service::store::kv::KvStore;
|
||||
@@ -218,6 +220,10 @@ mod tests {
|
||||
) -> Result<api::v1::meta::MoveValueResponse> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, _: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
let kv_store = Arc::new(Noop {});
|
||||
|
||||
@@ -18,9 +18,10 @@ pub mod kv;
|
||||
pub mod memory;
|
||||
|
||||
use api::v1::meta::{
|
||||
store_server, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse,
|
||||
CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse,
|
||||
MoveValueRequest, MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
store_server, BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
|
||||
DeleteRangeRequest, DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use tonic::{Request, Response};
|
||||
|
||||
@@ -57,6 +58,15 @@ impl store_server::Store for MetaSrv {
|
||||
Ok(Response::new(res))
|
||||
}
|
||||
|
||||
async fn batch_delete(
|
||||
&self,
|
||||
req: Request<BatchDeleteRequest>,
|
||||
) -> GrpcResult<BatchDeleteResponse> {
|
||||
let req = req.into_inner();
|
||||
let res = self.kv_store().batch_delete(req).await?;
|
||||
Ok(Response::new(res))
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
req: Request<CompareAndPutRequest>,
|
||||
@@ -144,6 +154,18 @@ mod tests {
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
let kv_store = Arc::new(MemStore::new());
|
||||
|
||||
let meta_srv = MetaSrvBuilder::new().kv_store(kv_store).build().await;
|
||||
|
||||
let req = BatchDeleteRequest::default();
|
||||
let res = meta_srv.batch_delete(req.into_request()).await;
|
||||
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compare_and_put() {
|
||||
let kv_store = Arc::new(MemStore::new());
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{
|
||||
BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest,
|
||||
CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, KeyValue, MoveValueRequest,
|
||||
MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, ResponseHeader,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, KeyValue, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse, ResponseHeader,
|
||||
};
|
||||
use common_error::prelude::*;
|
||||
use common_telemetry::warn;
|
||||
@@ -168,6 +169,44 @@ impl KvStore for EtcdStore {
|
||||
Ok(BatchPutResponse { header, prev_kvs })
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let BatchDelete {
|
||||
cluster_id,
|
||||
keys,
|
||||
options,
|
||||
} = req.try_into()?;
|
||||
|
||||
let mut prev_kvs = Vec::with_capacity(keys.len());
|
||||
|
||||
let delete_ops = keys
|
||||
.into_iter()
|
||||
.map(|k| TxnOp::delete(k, options.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
let txn = Txn::new().and_then(delete_ops);
|
||||
|
||||
let txn_res = self
|
||||
.client
|
||||
.kv_client()
|
||||
.txn(txn)
|
||||
.await
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
|
||||
for op_res in txn_res.op_responses() {
|
||||
match op_res {
|
||||
TxnOpResponse::Delete(delete_res) => {
|
||||
delete_res.prev_kvs().iter().for_each(|kv| {
|
||||
prev_kvs.push(KvPair::to_kv(kv));
|
||||
});
|
||||
}
|
||||
_ => unreachable!(), // never get here
|
||||
}
|
||||
}
|
||||
|
||||
let header = Some(ResponseHeader::success(cluster_id));
|
||||
|
||||
Ok(BatchDeleteResponse { header, prev_kvs })
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let CompareAndPut {
|
||||
cluster_id,
|
||||
@@ -406,7 +445,7 @@ impl TryFrom<BatchGetRequest> for BatchGet {
|
||||
fn try_from(req: BatchGetRequest) -> Result<Self> {
|
||||
let BatchGetRequest { header, keys } = req;
|
||||
|
||||
let options = GetOptions::default().with_keys_only();
|
||||
let options = GetOptions::default();
|
||||
|
||||
Ok(BatchGet {
|
||||
cluster_id: header.map_or(0, |h| h.cluster_id),
|
||||
@@ -445,6 +484,35 @@ impl TryFrom<BatchPutRequest> for BatchPut {
|
||||
}
|
||||
}
|
||||
|
||||
struct BatchDelete {
|
||||
cluster_id: u64,
|
||||
keys: Vec<Vec<u8>>,
|
||||
options: Option<DeleteOptions>,
|
||||
}
|
||||
|
||||
impl TryFrom<BatchDeleteRequest> for BatchDelete {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(req: BatchDeleteRequest) -> Result<Self> {
|
||||
let BatchDeleteRequest {
|
||||
header,
|
||||
keys,
|
||||
prev_kv,
|
||||
} = req;
|
||||
|
||||
let mut options = DeleteOptions::default();
|
||||
if prev_kv {
|
||||
options = options.with_prev_key();
|
||||
}
|
||||
|
||||
Ok(BatchDelete {
|
||||
cluster_id: header.map_or(0, |h| h.cluster_id),
|
||||
keys,
|
||||
options: Some(options),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct CompareAndPut {
|
||||
cluster_id: u64,
|
||||
key: Vec<u8>,
|
||||
@@ -628,6 +696,23 @@ mod tests {
|
||||
assert!(batch_put.options.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_batch_delete() {
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"k1".to_vec(), b"k2".to_vec(), b"k3".to_vec()],
|
||||
prev_kv: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let batch_delete: BatchDelete = req.try_into().unwrap();
|
||||
|
||||
assert_eq!(batch_delete.keys.len(), 3);
|
||||
assert_eq!(b"k1".to_vec(), batch_delete.keys.get(0).unwrap().to_vec());
|
||||
assert_eq!(b"k2".to_vec(), batch_delete.keys.get(1).unwrap().to_vec());
|
||||
assert_eq!(b"k3".to_vec(), batch_delete.keys.get(2).unwrap().to_vec());
|
||||
assert!(batch_delete.options.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_compare_and_put() {
|
||||
let req = CompareAndPutRequest {
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{
|
||||
BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest,
|
||||
CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, MoveValueRequest,
|
||||
MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -35,6 +36,8 @@ pub trait KvStore: Send + Sync {
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse>;
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse>;
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse>;
|
||||
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse>;
|
||||
|
||||
@@ -17,9 +17,10 @@ use std::collections::BTreeMap;
|
||||
use std::ops::Range;
|
||||
|
||||
use api::v1::meta::{
|
||||
BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest,
|
||||
CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, KeyValue, MoveValueRequest,
|
||||
MoveValueResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, ResponseHeader,
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, KeyValue, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse, ResponseHeader,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
@@ -163,6 +164,29 @@ impl KvStore for MemStore {
|
||||
Ok(BatchPutResponse { header, prev_kvs })
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let BatchDeleteRequest {
|
||||
header,
|
||||
keys,
|
||||
prev_kv,
|
||||
} = req;
|
||||
|
||||
let mut memory = self.inner.write();
|
||||
let prev_kvs = if prev_kv {
|
||||
keys.into_iter()
|
||||
.filter_map(|key| memory.remove(&key).map(|value| KeyValue { key, value }))
|
||||
.collect()
|
||||
} else {
|
||||
for key in keys.into_iter() {
|
||||
memory.remove(&key);
|
||||
}
|
||||
vec![]
|
||||
};
|
||||
let cluster_id = header.map_or(0, |h| h.cluster_id);
|
||||
let header = Some(ResponseHeader::success(cluster_id));
|
||||
Ok(BatchDeleteResponse { header, prev_kvs })
|
||||
}
|
||||
|
||||
async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
|
||||
let CompareAndPutRequest {
|
||||
header,
|
||||
|
||||
@@ -523,6 +523,7 @@ async fn test_alter_table_add_column() {
|
||||
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
|
||||
assert_eq!(new_schema.version(), old_schema.version() + 1);
|
||||
assert_eq!(new_meta.next_column_id, old_meta.next_column_id + 2);
|
||||
assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -572,6 +573,7 @@ async fn test_alter_table_remove_column() {
|
||||
assert_eq!(&[1, 2], &new_meta.value_indices[..]);
|
||||
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
|
||||
assert_eq!(new_schema.version(), old_schema.version() + 1);
|
||||
assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -793,10 +795,10 @@ async fn test_flush_table_all_regions() {
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(None).await.unwrap();
|
||||
table.flush(None, None).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(None).await.unwrap();
|
||||
table.flush(None, None).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
@@ -832,10 +834,10 @@ async fn test_flush_table_with_region_id() {
|
||||
};
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
table.flush(req.region_number, Some(false)).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
table.flush(req.region_number, Some(true)).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user