mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-27 16:32:54 +00:00
Compare commits
15 Commits
v0.1.2-alp
...
v0.1.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b2a09c888a | ||
|
|
af101480b3 | ||
|
|
b8f7f603cf | ||
|
|
8fb97ea1d8 | ||
|
|
21ce9c1163 | ||
|
|
0a22375ac1 | ||
|
|
0596d20a3b | ||
|
|
e19c8fa2b6 | ||
|
|
ad886f5b3e | ||
|
|
f6669a8201 | ||
|
|
ad5c47185d | ||
|
|
64441616db | ||
|
|
09491d6aee | ||
|
|
7cfa30b2ab | ||
|
|
a7676d8860 |
85
.github/workflows/release.yml
vendored
85
.github/workflows/release.yml
vendored
@@ -32,21 +32,37 @@ jobs:
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64
|
||||
continue-on-error: false
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-2004-16-cores
|
||||
file: greptime-linux-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-arm64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
- arch: x86_64-apple-darwin
|
||||
os: macos-latest
|
||||
file: greptime-darwin-amd64-pyo3
|
||||
continue-on-error: false
|
||||
opts: "-F pyo3_backend"
|
||||
runs-on: ${{ matrix.os }}
|
||||
continue-on-error: ${{ matrix.continue-on-error }}
|
||||
@@ -105,11 +121,12 @@ jobs:
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
|
||||
|
||||
- name: Compile Python 3.10.10 from source for Aarch64
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
|
||||
- name: Compile Python 3.10.10 from source for linux
|
||||
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
sudo chmod +x ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh
|
||||
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -124,17 +141,51 @@ jobs:
|
||||
if: env.DISABLE_RUN_TESTS == 'false'
|
||||
run: make unit-test integration-test sqlness-test
|
||||
|
||||
- name: Run cargo build for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu')
|
||||
- name: Run cargo build with pyo3 for aarch64-linux
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
|
||||
export PYO3_CROSS_LIB_DIR=$(pwd)/python_arm64_build/lib
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
|
||||
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
|
||||
alias python=python3
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build with pyo3 for amd64-linux
|
||||
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
|
||||
run: |
|
||||
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "version=3.10" >> pyo3.config
|
||||
echo "implementation=CPython" >> pyo3.config
|
||||
echo "shared=true" >> pyo3.config
|
||||
echo "abi3=true" >> pyo3.config
|
||||
echo "lib_name=python3.10" >> pyo3.config
|
||||
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
|
||||
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
|
||||
echo "pointer_width=64" >> pyo3.config
|
||||
echo "build_flags=" >> pyo3.config
|
||||
echo "suppress_build_script_link_lines=false" >> pyo3.config
|
||||
|
||||
cat pyo3.config
|
||||
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
|
||||
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
|
||||
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
|
||||
|
||||
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Run cargo build
|
||||
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') == false
|
||||
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
|
||||
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
|
||||
|
||||
- name: Calculate checksum and rename binary
|
||||
@@ -196,20 +247,20 @@ jobs:
|
||||
- name: Download amd64 binary
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-amd64
|
||||
name: greptime-linux-amd64-pyo3
|
||||
path: amd64
|
||||
|
||||
- name: Unzip the amd64 artifacts
|
||||
run: |
|
||||
cd amd64
|
||||
tar xvf greptime-linux-amd64.tgz
|
||||
rm greptime-linux-amd64.tgz
|
||||
tar xvf greptime-linux-amd64-pyo3.tgz
|
||||
rm greptime-linux-amd64-pyo3.tgz
|
||||
|
||||
- name: Download arm64 binary
|
||||
id: download-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: greptime-linux-arm64
|
||||
name: greptime-linux-arm64-pyo3
|
||||
path: arm64
|
||||
|
||||
- name: Unzip the arm64 artifacts
|
||||
@@ -217,8 +268,8 @@ jobs:
|
||||
if: success() || steps.download-arm64.conclusion == 'success'
|
||||
run: |
|
||||
cd arm64
|
||||
tar xvf greptime-linux-arm64.tgz
|
||||
rm greptime-linux-arm64.tgz
|
||||
tar xvf greptime-linux-arm64-pyo3.tgz
|
||||
rm greptime-linux-arm64-pyo3.tgz
|
||||
|
||||
- name: Build and push all
|
||||
uses: docker/build-push-action@v3
|
||||
|
||||
35
Cargo.lock
generated
35
Cargo.lock
generated
@@ -1485,6 +1485,18 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.1.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"futures",
|
||||
"object-store",
|
||||
"regex",
|
||||
"snafu",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.1.1"
|
||||
@@ -2280,6 +2292,7 @@ dependencies = [
|
||||
"client",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-datasource",
|
||||
"common-error",
|
||||
"common-grpc",
|
||||
"common-grpc-expr",
|
||||
@@ -2938,9 +2951,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "futures-core"
|
||||
version = "0.3.26"
|
||||
version = "0.3.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608"
|
||||
checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd"
|
||||
|
||||
[[package]]
|
||||
name = "futures-executor"
|
||||
@@ -3084,7 +3097,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3a715150563b89d5dfc81a5838eac1f66a5658a1#3a715150563b89d5dfc81a5838eac1f66a5658a1"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=eb760d219206c77dd3a105ecb6a3ba97d9d650ec#eb760d219206c77dd3a105ecb6a3ba97d9d650ec"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"tonic",
|
||||
@@ -4023,6 +4036,7 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"prost",
|
||||
"rand",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -6927,6 +6941,7 @@ dependencies = [
|
||||
"tokio-stream",
|
||||
"tokio-test",
|
||||
"tonic",
|
||||
"tonic-reflection",
|
||||
"tower",
|
||||
"tower-http",
|
||||
]
|
||||
@@ -8116,6 +8131,20 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic-reflection"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67494bad4dda4c9bffae901dfe14e2b2c0f760adb4706dc10beeb81799f7f7b2"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toolchain_find"
|
||||
version = "0.2.0"
|
||||
|
||||
@@ -7,6 +7,7 @@ members = [
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/function",
|
||||
"src/common/function-macro",
|
||||
@@ -68,6 +69,7 @@ futures-util = "0.3"
|
||||
parquet = "34.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
12
README.md
12
README.md
@@ -61,12 +61,12 @@ To compile GreptimeDB from source, you'll need:
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional, only needed if you want to run scripts
|
||||
in cpython): this install a Python shared library required for running python
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default.
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
#### Build with Docker
|
||||
|
||||
@@ -147,9 +147,9 @@ You can always cleanup test database by removing `/tmp/greptimedb`.
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
|
||||
downloadable pre-built binaries for Linux and MacOS
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb): pre-built
|
||||
Docker images
|
||||
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, TableId};
|
||||
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
@@ -61,7 +61,7 @@ struct Args {
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:3001"))]
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
@@ -97,6 +97,9 @@ async fn write_data(
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
@@ -122,11 +125,16 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let values = build_values(array);
|
||||
let (values, datatype) = build_values(array);
|
||||
let column = Column {
|
||||
column_name: field.name().to_owned(),
|
||||
values: Some(values),
|
||||
null_mask: vec![],
|
||||
null_mask: array
|
||||
.data()
|
||||
.null_bitmap()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
};
|
||||
@@ -136,7 +144,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> Values {
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
@@ -144,10 +152,13 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
@@ -155,29 +166,38 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
}
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
@@ -213,6 +233,10 @@ fn build_values(column: &ArrayRef) -> Values {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
@@ -340,7 +364,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
create_if_not_exists: false,
|
||||
table_options: Default::default(),
|
||||
region_ids: vec![0],
|
||||
table_id: Some(TableId { id: 0 }),
|
||||
table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# this script will download Python source code, compile it, and install it to /usr/local/lib
|
||||
# then use this python to compile cross-compiled python for aarch64
|
||||
ARCH=$1
|
||||
PYTHON_VERSION=3.10.10
|
||||
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
|
||||
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
|
||||
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
|
||||
|
||||
function download_python_source_code() {
|
||||
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
|
||||
tar -xvf Python-$PYTHON_VERSION.tgz
|
||||
}
|
||||
|
||||
function compile_for_amd64_platform() {
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
|
||||
|
||||
echo "Compiling for amd64 platform..."
|
||||
|
||||
./configure \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make install
|
||||
}
|
||||
|
||||
wget https://www.python.org/ftp/python/3.10.10/Python-3.10.10.tgz
|
||||
tar -xvf Python-3.10.10.tgz
|
||||
cd Python-3.10.10
|
||||
# explain Python compile options here a bit:s
|
||||
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
|
||||
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
|
||||
@@ -14,33 +41,47 @@ cd Python-3.10.10
|
||||
# ac_cv_have_long_long_format=yes: target platform supports long long type
|
||||
# disable-ipv6: disable ipv6 support, we don't need it in here
|
||||
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
|
||||
function compile_for_aarch64_platform() {
|
||||
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
|
||||
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
|
||||
|
||||
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
|
||||
|
||||
echo "Compiling for aarch64 platform..."
|
||||
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||
echo "LIBRARY_PATH: $LIBRARY_PATH"
|
||||
echo "PATH: $PATH"
|
||||
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
|
||||
|
||||
make
|
||||
make altinstall
|
||||
}
|
||||
|
||||
# Main script starts here.
|
||||
download_python_source_code
|
||||
|
||||
# Enter the python source code directory.
|
||||
cd $PYTHON_SOURCE_DIR || exit 1
|
||||
|
||||
# Build local python first, then build cross-compiled python.
|
||||
./configure \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
|
||||
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
|
||||
export PY_INSTALL_PATH=$(pwd)/python_arm64_build
|
||||
cd Python-3.10.10 && \
|
||||
make clean && \
|
||||
make distclean && \
|
||||
alias python=python3 && \
|
||||
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
|
||||
--prefix=$PY_INSTALL_PATH --enable-optimizations \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
RANLIB=aarch64-linux-gnu-ranlib \
|
||||
--enable-shared \
|
||||
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
|
||||
ac_cv_have_long_long_format=yes \
|
||||
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no && \
|
||||
make && make altinstall && \
|
||||
cd ..
|
||||
compile_for_amd64_platform
|
||||
|
||||
# Clean the build directory.
|
||||
make clean && make distclean
|
||||
|
||||
# Cross compile python for aarch64.
|
||||
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
|
||||
compile_for_aarch64_platform
|
||||
fi
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
|
||||
RUN python3 -m pip install pyarrow
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-time = { path = "../common/time" }
|
||||
datatypes = { path = "../datatypes" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3a715150563b89d5dfc81a5838eac1f66a5658a1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eb760d219206c77dd3a105ecb6a3ba97d9d650ec" }
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{RegionStat, TableName};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::{info, warn};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::engine::{EngineContext, TableEngineRef};
|
||||
use table::metadata::TableId;
|
||||
@@ -228,8 +228,10 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
|
||||
|
||||
/// The stat of regions in the datanode node.
|
||||
/// The number of regions can be got from len of vec.
|
||||
pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<RegionStat>> {
|
||||
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> Result<(u64, Vec<RegionStat>)> {
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
for catalog_name in catalog_manager.catalog_names()? {
|
||||
let catalog =
|
||||
catalog_manager
|
||||
@@ -255,16 +257,12 @@ pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<Reg
|
||||
table_info: &table_name,
|
||||
})?;
|
||||
|
||||
region_stats.extend(
|
||||
table
|
||||
.region_stats()
|
||||
.context(error::RegionStatsSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
table: &table_name,
|
||||
})?
|
||||
.into_iter()
|
||||
.map(|stat| RegionStat {
|
||||
let region_numbers = &table.table_info().meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
table_name: Some(TableName {
|
||||
catalog_name: catalog_name.clone(),
|
||||
@@ -273,10 +271,17 @@ pub async fn region_stats(catalog_manager: &CatalogManagerRef) -> Result<Vec<Reg
|
||||
}),
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
..Default::default()
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
region_stats.extend(stats);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get region status, err: {:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(region_stats)
|
||||
|
||||
Ok((region_number, region_stats))
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_client::GreptimeDatabaseClient;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use parking_lot::RwLock;
|
||||
@@ -23,6 +24,10 @@ use tonic::transport::Channel;
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
|
||||
pub(crate) struct DatabaseClient {
|
||||
pub(crate) inner: GreptimeDatabaseClient<Channel>,
|
||||
}
|
||||
|
||||
pub(crate) struct FlightClient {
|
||||
addr: String,
|
||||
client: FlightServiceClient<Channel>,
|
||||
@@ -118,7 +123,7 @@ impl Client {
|
||||
self.inner.set_peers(urls);
|
||||
}
|
||||
|
||||
pub(crate) fn make_client(&self) -> Result<FlightClient> {
|
||||
fn find_channel(&self) -> Result<(String, Channel)> {
|
||||
let addr = self
|
||||
.inner
|
||||
.get_peer()
|
||||
@@ -131,11 +136,23 @@ impl Client {
|
||||
.channel_manager
|
||||
.get(&addr)
|
||||
.context(error::CreateChannelSnafu { addr: &addr })?;
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn make_database_client(&self) -> Result<DatabaseClient> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,15 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::ddl_request::Expr as DdlExpr;
|
||||
use api::v1::greptime_request::Request;
|
||||
use api::v1::query_request::Query;
|
||||
use api::v1::{
|
||||
AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DropTableExpr, FlushTableExpr,
|
||||
GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest, RequestHeader,
|
||||
greptime_response, AffectedRows, AlterExpr, AuthHeader, CreateTableExpr, DdlRequest,
|
||||
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequest, PromRangeQuery, QueryRequest,
|
||||
RequestHeader,
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use common_error::prelude::*;
|
||||
@@ -31,7 +30,9 @@ use futures_util::{TryFutureExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu};
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
};
|
||||
use crate::{error, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -78,8 +79,26 @@ impl Database {
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<Output> {
|
||||
self.do_get(Request::Insert(request)).await
|
||||
pub async fn insert(&self, request: InsertRequest) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
}),
|
||||
request: Some(Request::Insert(request)),
|
||||
};
|
||||
let response = client
|
||||
.handle(request)
|
||||
.await?
|
||||
.into_inner()
|
||||
.response
|
||||
.context(IllegalDatabaseResponseSnafu {
|
||||
err_msg: "GreptimeResponse is empty",
|
||||
})?;
|
||||
let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
@@ -155,7 +174,7 @@ impl Database {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
let mut client = self.client.make_client()?;
|
||||
let mut client = self.client.make_flight_client()?;
|
||||
|
||||
// TODO(LFC): Streaming get flight data.
|
||||
let flight_data: Vec<FlightData> = client
|
||||
@@ -164,22 +183,22 @@ impl Database {
|
||||
.and_then(|response| response.into_inner().try_collect())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
error::ExternalSnafu { code, msg }
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
error::ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::FlightGetSnafu {
|
||||
tonic_code: e.code(),
|
||||
tonic_code,
|
||||
addr: client.addr(),
|
||||
})
|
||||
.map_err(|error| {
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
client.addr(),
|
||||
e.code(),
|
||||
tonic_code,
|
||||
error
|
||||
);
|
||||
error
|
||||
@@ -210,12 +229,6 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FlightContext {
|
||||
auth_header: Option<AuthHeader>,
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -65,9 +66,12 @@ pub enum Error {
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
/// Error deserialized from gRPC metadata
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
ExternalError { code: StatusCode, msg: String },
|
||||
Server { code: StatusCode, msg: String },
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -77,13 +81,15 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::IllegalFlightMessages { .. }
|
||||
| Error::ColumnDataType { .. }
|
||||
| Error::MissingField { .. } => StatusCode::Internal,
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
Error::ExternalError { code, .. } => *code,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,3 +101,21 @@ impl ErrorExt for Error {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, INNER_ERROR_CODE)
|
||||
.and_then(|s| StatusCode::from_str(&s).ok())
|
||||
.unwrap_or(StatusCode::Unknown);
|
||||
|
||||
let msg = get_metadata_value(&e, INNER_ERROR_MSG).unwrap_or(e.to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle cli shutdown
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::error::{Error, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::toml_loader;
|
||||
|
||||
pub struct Instance {
|
||||
@@ -34,8 +34,10 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle datanode shutdown
|
||||
Ok(())
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,24 @@ pub enum Error {
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
ShutdownDatanode {
|
||||
#[snafu(backtrace)]
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
StartFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
ShutdownFrontend {
|
||||
#[snafu(backtrace)]
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
BuildMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
@@ -44,6 +56,12 @@ pub enum Error {
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
ShutdownMetaServer {
|
||||
#[snafu(backtrace)]
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read config file: {}, source: {}", path, source))]
|
||||
ReadConfig {
|
||||
path: String,
|
||||
@@ -143,7 +161,10 @@ impl ErrorExt for Error {
|
||||
match self {
|
||||
Error::StartDatanode { source } => source.status_code(),
|
||||
Error::StartFrontend { source } => source.status_code(),
|
||||
Error::ShutdownDatanode { source } => source.status_code(),
|
||||
Error::ShutdownFrontend { source } => source.status_code(),
|
||||
Error::StartMetaServer { source } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source } => source.status_code(),
|
||||
Error::BuildMetaServer { source } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
|
||||
|
||||
@@ -47,8 +47,10 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle frontend shutdown
|
||||
Ok(())
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownFrontendSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,13 +30,14 @@ impl Instance {
|
||||
self.instance
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)?;
|
||||
Ok(())
|
||||
.context(error::StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle metasrv shutdown
|
||||
Ok(())
|
||||
self.instance
|
||||
.shutdown()
|
||||
.await
|
||||
.context(error::ShutdownMetaServerSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,10 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
|
||||
use crate::error::{
|
||||
Error, IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::toml_loader;
|
||||
|
||||
@@ -152,7 +155,17 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
// TODO: handle standalone shutdown
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown_instance()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
13
src/common/datasource/Cargo.toml
Normal file
13
src/common/datasource/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "common-datasource"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { path = "../error" }
|
||||
futures.workspace = true
|
||||
object-store = { path = "../../object-store" }
|
||||
regex = "1.7"
|
||||
snafu.workspace = true
|
||||
url = "2.3"
|
||||
75
src/common/datasource/src/error.rs
Normal file
75
src/common/datasource/src/error.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
UnsupportedBackendProtocol { protocol: String },
|
||||
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String },
|
||||
|
||||
#[snafu(display("Invalid path: {}", path))]
|
||||
InvalidPath { path: String },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
InvalidUrl { url: String, source: ParseError },
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
BuildBackend { .. } | ListObjects { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| InvalidConnection { .. }
|
||||
| InvalidUrl { .. }
|
||||
| EmptyHostPath { .. }
|
||||
| InvalidPath { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn backtrace_opt(&self) -> Option<&Backtrace> {
|
||||
ErrorCompat::backtrace(self)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
18
src/common/datasource/src/lib.rs
Normal file
18
src/common/datasource/src/lib.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
pub mod lister;
|
||||
pub mod object_store;
|
||||
pub mod util;
|
||||
81
src/common/datasource/src/lister.rs
Normal file
81
src/common/datasource/src/lister.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::{future, TryStreamExt};
|
||||
use object_store::{Object, ObjectStore};
|
||||
use regex::Regex;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
pub struct Lister {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl Lister {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
) -> Self {
|
||||
Lister {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Object>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.object(&self.path)
|
||||
.list()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = self
|
||||
.regex
|
||||
.as_ref()
|
||||
.map(|x| x.is_match(f.name()))
|
||||
.unwrap_or(true);
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
let obj = self
|
||||
.object_store
|
||||
.object(&format!("{}{}", self.path, filename));
|
||||
|
||||
Ok(vec![obj])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
60
src/common/datasource/src/object_store.rs
Normal file
60
src/common/datasource/src/object_store.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod fs;
|
||||
pub mod s3;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::ObjectStore;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use self::fs::build_fs_backend;
|
||||
use self::s3::build_s3_backend;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// parse url returns (schema,Option<host>,path)
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
Ok(url) => Ok((
|
||||
url.scheme().to_string(),
|
||||
url.host_str().map(|s| s.to_string()),
|
||||
url.path().to_string(),
|
||||
)),
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
Ok((FS_SCHEMA.to_string(), None, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(err).context(error::InvalidUrlSnafu { url }),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let host = host.context(error::EmptyHostPathSnafu {
|
||||
url: url.to_string(),
|
||||
})?;
|
||||
Ok(build_s3_backend(&host, "/", connection)?)
|
||||
}
|
||||
FS_SCHEMA => Ok(build_fs_backend("/")?),
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
|
||||
}
|
||||
}
|
||||
28
src/common/datasource/src/object_store/fs.rs
Normal file
28
src/common/datasource/src/object_store/fs.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use object_store::services::Fs;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let accessor = Fs::default()
|
||||
.root(root)
|
||||
.build()
|
||||
.context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
79
src/common/datasource/src/object_store/s3.rs
Normal file
79
src/common/datasource/src/object_store/s3.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use object_store::services::S3;
|
||||
use object_store::{ObjectStore, ObjectStoreBuilder};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: &str,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
builder.bucket(host);
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
125
src/common/datasource/src/util.rs
Normal file
125
src/common/datasource/src/util.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ pub enum Error {
|
||||
DecodeInsert { source: DecodeError },
|
||||
|
||||
#[snafu(display("Illegal insert data"))]
|
||||
IllegalInsertData,
|
||||
IllegalInsertData { backtrace: Backtrace },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
ColumnDataType {
|
||||
|
||||
@@ -26,7 +26,7 @@ tower = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
[[bench]]
|
||||
name = "bench_main"
|
||||
|
||||
@@ -12,4 +12,4 @@ serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![feature(int_roundings)]
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -26,6 +26,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
|
||||
use crate::util::div_ceil;
|
||||
|
||||
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
|
||||
pub struct Timestamp {
|
||||
@@ -143,7 +144,7 @@ impl Timestamp {
|
||||
Some(Timestamp::new(value, unit))
|
||||
} else {
|
||||
let mul = unit.factor() / self.unit().factor();
|
||||
Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
|
||||
Some(Timestamp::new(div_ceil(self.value, mul as i64), unit))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,17 @@ pub fn current_time_millis() -> i64 {
|
||||
chrono::Utc::now().timestamp_millis()
|
||||
}
|
||||
|
||||
/// Port of rust unstable features `int_roundings`.
|
||||
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
|
||||
let d = this / rhs;
|
||||
let r = this % rhs;
|
||||
if r > 0 && rhs > 0 {
|
||||
d + 1
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::{self, SystemTime};
|
||||
@@ -42,4 +53,10 @@ mod tests {
|
||||
assert_eq!(datetime_std.hour(), datetime_now.hour());
|
||||
assert_eq!(datetime_std.minute(), datetime_now.minute());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_div_ceil() {
|
||||
let v0 = 9223372036854676001;
|
||||
assert_eq!(9223372036854677, div_ceil(v0, 1000));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ catalog = { path = "../catalog" }
|
||||
common-base = { path = "../common/base" }
|
||||
common-catalog = { path = "../common/catalog" }
|
||||
common-error = { path = "../common/error" }
|
||||
common-datasource = { path = "../common/datasource" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-grpc-expr = { path = "../common/grpc-expr" }
|
||||
common-procedure = { path = "../common/procedure" }
|
||||
|
||||
@@ -255,7 +255,7 @@ impl Datanode {
|
||||
self.instance.clone()
|
||||
}
|
||||
|
||||
async fn shutdown_instance(&self) -> Result<()> {
|
||||
pub async fn shutdown_instance(&self) -> Result<()> {
|
||||
self.instance.shutdown().await
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_datasource::error::Error as DataSourceError;
|
||||
use common_error::prelude::*;
|
||||
use common_procedure::ProcedureId;
|
||||
use common_recordbatch::error::Error as RecordBatchError;
|
||||
@@ -218,7 +219,13 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse url, source: {}", source))]
|
||||
ParseUrl {
|
||||
source: DataSourceError,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
|
||||
@@ -249,6 +256,12 @@ pub enum Error {
|
||||
source: regex::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list objects, source: {}", source))]
|
||||
ListObjects {
|
||||
#[snafu(backtrace)]
|
||||
source: DataSourceError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse the data, source: {}", source))]
|
||||
ParseDataTypes {
|
||||
#[snafu(backtrace)]
|
||||
@@ -475,13 +488,6 @@ pub enum Error {
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to lists object in path: {}, source: {}", path, source))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
backtrace: Backtrace,
|
||||
source: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unrecognized table option: {}", source))]
|
||||
UnrecognizedTableOption {
|
||||
#[snafu(backtrace)]
|
||||
@@ -584,7 +590,8 @@ impl ErrorExt for Error {
|
||||
| DatabaseNotFound { .. }
|
||||
| MissingNodeId { .. }
|
||||
| MissingMetasrvOpts { .. }
|
||||
| ColumnNoneDefaultValue { .. } => StatusCode::InvalidArguments,
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| ParseUrl { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
// TODO(yingwen): Further categorize http error.
|
||||
StartServer { .. }
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
|
||||
use catalog::{region_stats, CatalogManagerRef};
|
||||
use catalog::{datanode_stat, CatalogManagerRef};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use meta_client::client::{HeartbeatSender, MetaClient};
|
||||
use snafu::ResultExt;
|
||||
@@ -106,8 +106,8 @@ impl HeartbeatTask {
|
||||
let mut tx = Self::create_streams(&meta_client, running.clone()).await?;
|
||||
common_runtime::spawn_bg(async move {
|
||||
while running.load(Ordering::Acquire) {
|
||||
let (region_num, region_stats) = match region_stats(&catalog_manager_clone).await {
|
||||
Ok(region_stats) => (region_stats.len() as i64, region_stats),
|
||||
let (region_num, region_stats) = match datanode_stat(&catalog_manager_clone).await {
|
||||
Ok(datanode_stat) => (datanode_stat.0 as i64, datanode_stat.1),
|
||||
Err(e) => {
|
||||
error!("failed to get region status, err: {e:?}");
|
||||
(-1, vec![])
|
||||
|
||||
@@ -28,12 +28,10 @@ use servers::prom::PromHandler;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::prelude::*;
|
||||
use sql::ast::ObjectName;
|
||||
use sql::statements::copy::CopyTable;
|
||||
use sql::statements::copy::{CopyTable, CopyTableArgument};
|
||||
use sql::statements::statement::Statement;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{
|
||||
CopyTableFromRequest, CopyTableRequest, CreateDatabaseRequest, DropTableRequest,
|
||||
};
|
||||
use table::requests::{CopyDirection, CopyTableRequest, CreateDatabaseRequest, DropTableRequest};
|
||||
|
||||
use crate::error::{
|
||||
self, BumpTableIdSnafu, ExecuteSqlSnafu, ExecuteStatementSnafu, PlanStatementSnafu, Result,
|
||||
@@ -160,39 +158,54 @@ impl Instance {
|
||||
QueryStatement::Sql(Statement::ShowCreateTable(_show_create_table)) => {
|
||||
unimplemented!("SHOW CREATE TABLE is unimplemented yet");
|
||||
}
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let file_name = copy_table.file_name;
|
||||
let req = CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
file_name,
|
||||
connection: copy_table.connection,
|
||||
};
|
||||
QueryStatement::Sql(Statement::Copy(copy_table)) => {
|
||||
let req = match copy_table {
|
||||
CopyTable::To(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Export,
|
||||
}
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let CopyTableArgument {
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
table_name,
|
||||
..
|
||||
} = copy_table;
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(&table_name, query_ctx.clone())?;
|
||||
CopyTableRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
location,
|
||||
connection,
|
||||
pattern,
|
||||
direction: CopyDirection::Import,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
CopyTable::From(copy_table) => {
|
||||
let (catalog_name, schema_name, table_name) =
|
||||
table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
|
||||
let req = CopyTableFromRequest {
|
||||
catalog_name,
|
||||
schema_name,
|
||||
table_name,
|
||||
connection: copy_table.connection,
|
||||
pattern: copy_table.pattern,
|
||||
from: copy_table.from,
|
||||
};
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTableFrom(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
},
|
||||
self.sql_handler
|
||||
.execute(SqlRequest::CopyTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
QueryStatement::Sql(Statement::Query(_))
|
||||
| QueryStatement::Sql(Statement::Explain(_))
|
||||
| QueryStatement::Sql(Statement::Use(_))
|
||||
|
||||
@@ -95,6 +95,7 @@ impl Instance {
|
||||
schema_name: expr.schema_name,
|
||||
table_name,
|
||||
region_number: expr.region_id,
|
||||
wait: None,
|
||||
};
|
||||
self.sql_handler()
|
||||
.execute(SqlRequest::FlushTable(req), QueryContext::arc())
|
||||
|
||||
@@ -34,8 +34,8 @@ use crate::error::{
|
||||
use crate::instance::sql::table_idents_to_full_name;
|
||||
|
||||
mod alter;
|
||||
mod copy_table;
|
||||
mod copy_table_from;
|
||||
mod copy_table_to;
|
||||
mod create;
|
||||
mod delete;
|
||||
mod drop_table;
|
||||
@@ -55,7 +55,6 @@ pub enum SqlRequest {
|
||||
DescribeTable(DescribeTable),
|
||||
Delete(Delete),
|
||||
CopyTable(CopyTableRequest),
|
||||
CopyTableFrom(CopyTableFromRequest),
|
||||
}
|
||||
|
||||
// Handler to execute SQL except query
|
||||
@@ -96,8 +95,10 @@ impl SqlHandler {
|
||||
SqlRequest::Alter(req) => self.alter(req).await,
|
||||
SqlRequest::DropTable(req) => self.drop_table(req).await,
|
||||
SqlRequest::Delete(req) => self.delete(query_ctx.clone(), req).await,
|
||||
SqlRequest::CopyTable(req) => self.copy_table(req).await,
|
||||
SqlRequest::CopyTableFrom(req) => self.copy_table_from(req).await,
|
||||
SqlRequest::CopyTable(req) => match req.direction {
|
||||
CopyDirection::Export => self.copy_table_to(req).await,
|
||||
CopyDirection::Import => self.copy_table_from(req).await,
|
||||
},
|
||||
SqlRequest::ShowDatabases(req) => {
|
||||
show_databases(req, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
|
||||
}
|
||||
|
||||
@@ -15,35 +15,26 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_compat::CompatExt;
|
||||
use common_datasource::lister::{Lister, Source};
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_datasource::util::find_dir_and_filename;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::DataTypesSnafu;
|
||||
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
|
||||
use datatypes::arrow::record_batch::RecordBatch;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use futures::future;
|
||||
use futures_util::TryStreamExt;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::{Object, ObjectStore, ObjectStoreBuilder};
|
||||
use regex::Regex;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::engine::TableReference;
|
||||
use table::requests::{CopyTableFromRequest, InsertRequest};
|
||||
use table::requests::{CopyTableRequest, InsertRequest};
|
||||
use tokio::io::BufReader;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
const ENDPOINT_URL: &str = "ENDPOINT_URL";
|
||||
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
|
||||
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
|
||||
const SESSION_TOKEN: &str = "SESSION_TOKEN";
|
||||
const REGION: &str = "REGION";
|
||||
const ENABLE_VIRTUAL_HOST_STYLE: &str = "ENABLE_VIRTUAL_HOST_STYLE";
|
||||
|
||||
impl SqlHandler {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableFromRequest) -> Result<Output> {
|
||||
pub(crate) async fn copy_table_from(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
@@ -51,9 +42,29 @@ impl SqlHandler {
|
||||
};
|
||||
let table = self.get_table(&table_ref)?;
|
||||
|
||||
let datasource = DataSource::new(&req.from, req.pattern, req.connection)?;
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
|
||||
let objects = datasource.list().await?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let (dir, filename) = find_dir_and_filename(&path);
|
||||
|
||||
let regex = req
|
||||
.pattern
|
||||
.as_ref()
|
||||
.map(|x| Regex::new(x))
|
||||
.transpose()
|
||||
.context(error::BuildRegexSnafu)?;
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let lister = Lister::new(object_store, source, dir, regex);
|
||||
|
||||
let objects = lister.list().await.context(error::ListObjectsSnafu)?;
|
||||
|
||||
let mut buf: Vec<RecordBatch> = Vec::new();
|
||||
|
||||
@@ -131,321 +142,3 @@ impl SqlHandler {
|
||||
Ok(Output::AffectedRows(result.iter().sum()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum Source {
|
||||
Filename(String),
|
||||
Dir,
|
||||
}
|
||||
|
||||
struct DataSource {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
impl DataSource {
|
||||
fn from_path(url: &str, regex: Option<Regex>) -> Result<DataSource> {
|
||||
let result = if url.ends_with('/') {
|
||||
Url::from_directory_path(url)
|
||||
} else {
|
||||
Url::from_file_path(url)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let path = url.path();
|
||||
|
||||
let (path, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = build_fs_backend(&path)?;
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
Err(()) => error::InvalidPathSnafu {
|
||||
path: url.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_url(
|
||||
url: Url,
|
||||
regex: Option<Regex>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let host = url.host_str();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let (dir, filename) = DataSource::find_dir_and_filename(path);
|
||||
|
||||
let source = if let Some(filename) = filename {
|
||||
Source::Filename(filename)
|
||||
} else {
|
||||
Source::Dir
|
||||
};
|
||||
|
||||
let object_store = match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => build_s3_backend(host, &dir, connection)?,
|
||||
_ => {
|
||||
return error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(DataSource {
|
||||
object_store,
|
||||
source,
|
||||
path: dir,
|
||||
regex,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
url: &str,
|
||||
pattern: Option<String>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<DataSource> {
|
||||
let regex = if let Some(pattern) = pattern {
|
||||
let regex = Regex::new(&pattern).context(error::BuildRegexSnafu)?;
|
||||
Some(regex)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => DataSource::from_url(url, regex, connection),
|
||||
Err(err) => {
|
||||
if ParseError::RelativeUrlWithoutBase == err {
|
||||
DataSource::from_path(url, regex)
|
||||
} else {
|
||||
Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<Object>> {
|
||||
match &self.source {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.object("/")
|
||||
.list()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
let res = if let Some(regex) = &self.regex {
|
||||
regex.is_match(f.name())
|
||||
} else {
|
||||
true
|
||||
};
|
||||
future::ready(res)
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
let obj = self.object_store.object(filename);
|
||||
|
||||
Ok(vec![obj])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_dir_and_filename(path: &str) -> (String, Option<String>) {
|
||||
if path.is_empty() {
|
||||
("/".to_string(), None)
|
||||
} else if path.ends_with('/') {
|
||||
(path.to_string(), None)
|
||||
} else if let Some(idx) = path.rfind('/') {
|
||||
(
|
||||
path[..idx + 1].to_string(),
|
||||
Some(path[idx + 1..].to_string()),
|
||||
)
|
||||
} else {
|
||||
("/".to_string(), Some(path.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_s3_backend(
|
||||
host: Option<&str>,
|
||||
path: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<ObjectStore> {
|
||||
let mut builder = S3::default();
|
||||
|
||||
builder.root(path);
|
||||
|
||||
if let Some(bucket) = host {
|
||||
builder.bucket(bucket);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
|
||||
builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = connection.get(REGION) {
|
||||
builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
|
||||
builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
|
||||
builder.secret_access_key(key);
|
||||
}
|
||||
|
||||
if let Some(session_token) = connection.get(SESSION_TOKEN) {
|
||||
builder.security_token(session_token);
|
||||
}
|
||||
|
||||
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
|
||||
let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
|
||||
error::InvalidConnectionSnafu {
|
||||
msg: format!(
|
||||
"failed to parse the option {}={}, {}",
|
||||
ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
|
||||
),
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
if enable {
|
||||
builder.enable_virtual_host_style();
|
||||
}
|
||||
}
|
||||
|
||||
let accessor = builder.build().context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let accessor = Fs::default()
|
||||
.root(root)
|
||||
.build()
|
||||
.context(error::BuildBackendSnafu)?;
|
||||
|
||||
Ok(ObjectStore::new(accessor).finish())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use url::Url;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_parse_uri() {
|
||||
struct Test<'a> {
|
||||
uri: &'a str,
|
||||
expected_path: &'a str,
|
||||
expected_schema: &'a str,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
uri: "s3://bucket/to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "s3",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/",
|
||||
expected_path: "/to/path/",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
Test {
|
||||
uri: "fs:///to/path/file",
|
||||
expected_path: "/to/path/file",
|
||||
expected_schema: "fs",
|
||||
},
|
||||
];
|
||||
for test in tests {
|
||||
let parsed_uri = Url::parse(test.uri).unwrap();
|
||||
assert_eq!(parsed_uri.path(), test.expected_path);
|
||||
assert_eq!(parsed_uri.scheme(), test.expected_schema);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path_and_dir() {
|
||||
let parsed = Url::from_file_path("/to/path/file").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/file");
|
||||
|
||||
let parsed = Url::from_directory_path("/to/path/").unwrap();
|
||||
assert_eq!(parsed.path(), "/to/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_dir_and_filename() {
|
||||
struct Test<'a> {
|
||||
path: &'a str,
|
||||
expected_dir: &'a str,
|
||||
expected_filename: Option<String>,
|
||||
}
|
||||
|
||||
let tests = [
|
||||
Test {
|
||||
path: "to/path/",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "to/path/filename",
|
||||
expected_dir: "to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/to/path/filename",
|
||||
expected_dir: "/to/path/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "/",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
Test {
|
||||
path: "filename",
|
||||
expected_dir: "/",
|
||||
expected_filename: Some("filename".into()),
|
||||
},
|
||||
Test {
|
||||
path: "",
|
||||
expected_dir: "/",
|
||||
expected_filename: None,
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
let (path, filename) = DataSource::find_dir_and_filename(test.path);
|
||||
assert_eq!(test.expected_dir, path);
|
||||
assert_eq!(test.expected_filename, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use common_datasource;
|
||||
use common_datasource::object_store::{build_backend, parse_url};
|
||||
use common_query::physical_plan::SessionContext;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||
@@ -27,51 +28,12 @@ use object_store::ObjectStore;
|
||||
use snafu::ResultExt;
|
||||
use table::engine::TableReference;
|
||||
use table::requests::CopyTableRequest;
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use super::copy_table_from::{build_fs_backend, build_s3_backend, S3_SCHEMA};
|
||||
use crate::error::{self, Result};
|
||||
use crate::sql::SqlHandler;
|
||||
|
||||
impl SqlHandler {
|
||||
fn build_backend(
|
||||
&self,
|
||||
url: &str,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Result<(ObjectStore, String)> {
|
||||
let result = Url::parse(url);
|
||||
|
||||
match result {
|
||||
Ok(url) => {
|
||||
let host = url.host_str();
|
||||
|
||||
let schema = url.scheme();
|
||||
|
||||
let path = url.path();
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let object_store = build_s3_backend(host, "/", connection)?;
|
||||
Ok((object_store, path.to_string()))
|
||||
}
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
Err(ParseError::RelativeUrlWithoutBase) => {
|
||||
let object_store = build_fs_backend("/")?;
|
||||
Ok((object_store, url.to_string()))
|
||||
}
|
||||
Err(err) => Err(error::Error::InvalidUrl {
|
||||
url: url.to_string(),
|
||||
source: err,
|
||||
}),
|
||||
}
|
||||
}
|
||||
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
pub(crate) async fn copy_table_to(&self, req: CopyTableRequest) -> Result<Output> {
|
||||
let table_ref = TableReference {
|
||||
catalog: &req.catalog_name,
|
||||
schema: &req.schema_name,
|
||||
@@ -91,9 +53,11 @@ impl SqlHandler {
|
||||
.context(error::TableScanExecSnafu)?;
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
|
||||
|
||||
let (object_store, file_name) = self.build_backend(&req.file_name, req.connection)?;
|
||||
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
|
||||
let object_store =
|
||||
build_backend(&req.location, req.connection).context(error::BuildBackendSnafu)?;
|
||||
|
||||
let mut parquet_writer = ParquetWriter::new(file_name, stream, object_store);
|
||||
let mut parquet_writer = ParquetWriter::new(path.to_string(), stream, object_store);
|
||||
// TODO(jiachun):
|
||||
// For now, COPY is implemented synchronously.
|
||||
// When copying large table, it will be blocked for a long time.
|
||||
@@ -28,6 +28,7 @@ impl SqlHandler {
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
req.wait,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
@@ -47,6 +48,7 @@ impl SqlHandler {
|
||||
&req.schema_name,
|
||||
table,
|
||||
req.region_number,
|
||||
req.wait,
|
||||
)
|
||||
}))
|
||||
.await
|
||||
@@ -62,6 +64,7 @@ impl SqlHandler {
|
||||
schema: &str,
|
||||
table: &str,
|
||||
region: Option<u32>,
|
||||
wait: Option<bool>,
|
||||
) -> Result<()> {
|
||||
let table_ref = TableReference {
|
||||
catalog,
|
||||
@@ -71,8 +74,11 @@ impl SqlHandler {
|
||||
|
||||
let full_table_name = table_ref.to_string();
|
||||
let table = self.get_table(&table_ref)?;
|
||||
table.flush(region).await.context(error::FlushTableSnafu {
|
||||
table_name: full_table_name,
|
||||
})
|
||||
table
|
||||
.flush(region, wait)
|
||||
.await
|
||||
.context(error::FlushTableSnafu {
|
||||
table_name: full_table_name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,36 +769,6 @@ async fn test_delete() {
|
||||
check_output_stream(output, expect).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_to() {
|
||||
let instance = setup_test_instance("test_execute_copy_to").await;
|
||||
|
||||
// setups
|
||||
execute_sql(
|
||||
&instance,
|
||||
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
// exports
|
||||
let data_dir = instance.data_tmp_dir().path();
|
||||
|
||||
let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
|
||||
|
||||
let output = execute_sql(&instance, ©_to_stmt).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_to_s3() {
|
||||
logging::init_default_ut_logging();
|
||||
@@ -838,91 +808,6 @@ async fn test_execute_copy_to_s3() {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_from() {
|
||||
let instance = setup_test_instance("test_execute_copy_from").await;
|
||||
|
||||
// setups
|
||||
execute_sql(
|
||||
&instance,
|
||||
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
r#"insert into demo(host, cpu, memory, ts) values
|
||||
('host1', 66.6, 1024, 1655276557000),
|
||||
('host2', 88.8, 333.3, 1655276558000)
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
// export
|
||||
let data_dir = instance.data_tmp_dir().path();
|
||||
|
||||
let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
|
||||
|
||||
let output = execute_sql(&instance, ©_to_stmt).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
struct Test<'a> {
|
||||
sql: &'a str,
|
||||
table_name: &'a str,
|
||||
}
|
||||
let tests = [
|
||||
Test {
|
||||
sql: &format!(
|
||||
"Copy with_filename FROM '{}/export/demo.parquet_1_2'",
|
||||
data_dir.display()
|
||||
),
|
||||
table_name: "with_filename",
|
||||
},
|
||||
Test {
|
||||
sql: &format!("Copy with_path FROM '{}/export/'", data_dir.display()),
|
||||
table_name: "with_path",
|
||||
},
|
||||
Test {
|
||||
sql: &format!(
|
||||
"Copy with_pattern FROM '{}/export/' WITH (PATTERN = 'demo.*')",
|
||||
data_dir.display()
|
||||
),
|
||||
table_name: "with_pattern",
|
||||
},
|
||||
];
|
||||
|
||||
for test in tests {
|
||||
// import
|
||||
execute_sql(
|
||||
&instance,
|
||||
&format!(
|
||||
"create table {}(host string, cpu double, memory double, ts timestamp time index);",
|
||||
test.table_name
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = execute_sql(&instance, test.sql).await;
|
||||
assert!(matches!(output, Output::AffectedRows(2)));
|
||||
|
||||
let output = execute_sql(
|
||||
&instance,
|
||||
&format!("select * from {} order by ts", test.table_name),
|
||||
)
|
||||
.await;
|
||||
let expected = "\
|
||||
+-------+------+--------+---------------------+
|
||||
| host | cpu | memory | ts |
|
||||
+-------+------+--------+---------------------+
|
||||
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
|
||||
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
|
||||
+-------+------+--------+---------------------+"
|
||||
.to_string();
|
||||
check_output_stream(output, expected).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_execute_copy_from_s3() {
|
||||
logging::init_default_ut_logging();
|
||||
|
||||
@@ -116,10 +116,6 @@ impl MockInstance {
|
||||
pub(crate) fn inner(&self) -> &Instance {
|
||||
&self.instance
|
||||
}
|
||||
|
||||
pub(crate) fn data_tmp_dir(&self) -> &TempDir {
|
||||
&self._guard._data_tmp_dir
|
||||
}
|
||||
}
|
||||
|
||||
struct TestGuard {
|
||||
|
||||
@@ -74,8 +74,7 @@ impl DistTable {
|
||||
|
||||
let mut success = 0;
|
||||
for join in joins {
|
||||
let object_result = join.await.context(error::JoinTaskSnafu)??;
|
||||
let Output::AffectedRows(rows) = object_result else { unreachable!() };
|
||||
let rows = join.await.context(error::JoinTaskSnafu)?? as usize;
|
||||
success += rows;
|
||||
}
|
||||
Ok(Output::AffectedRows(success))
|
||||
|
||||
@@ -47,7 +47,7 @@ impl DatanodeInstance {
|
||||
Self { table, db }
|
||||
}
|
||||
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<Output> {
|
||||
pub(crate) async fn grpc_insert(&self, request: InsertRequest) -> client::Result<u32> {
|
||||
self.db.insert(request).await
|
||||
}
|
||||
|
||||
|
||||
@@ -125,15 +125,15 @@ pub(crate) async fn create_datanode_client(
|
||||
|
||||
// create a mock datanode grpc service, see example here:
|
||||
// https://github.com/hyperium/tonic/blob/master/examples/src/mock/mock.rs
|
||||
let datanode_service = GrpcServer::new(
|
||||
let grpc_server = GrpcServer::new(
|
||||
ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
|
||||
None,
|
||||
runtime,
|
||||
)
|
||||
.create_service();
|
||||
);
|
||||
tokio::spawn(async move {
|
||||
Server::builder()
|
||||
.add_service(datanode_service)
|
||||
.add_service(grpc_server.create_flight_service())
|
||||
.add_service(grpc_server.create_database_service())
|
||||
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
|
||||
.await
|
||||
});
|
||||
|
||||
@@ -33,4 +33,4 @@ tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ common-error = { path = "../common/error" }
|
||||
common-grpc = { path = "../common/grpc" }
|
||||
common-telemetry = { path = "../common/telemetry" }
|
||||
etcd-client = "0.10"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -28,6 +28,7 @@ http-body = "0.4"
|
||||
lazy_static = "1.4"
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
regex = "1.6"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -22,6 +22,7 @@ use api::v1::meta::store_server::StoreServer;
|
||||
use etcd_client::Client;
|
||||
use snafu::ResultExt;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio_stream::wrappers::TcpListenerStream;
|
||||
use tonic::transport::server::Router;
|
||||
|
||||
@@ -44,44 +45,65 @@ pub struct MetaSrvInstance {
|
||||
meta_srv: MetaSrv,
|
||||
|
||||
opts: MetaSrvOptions,
|
||||
|
||||
signal_sender: Option<Sender<()>>,
|
||||
}
|
||||
|
||||
impl MetaSrvInstance {
|
||||
pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> {
|
||||
let meta_srv = build_meta_srv(&opts).await?;
|
||||
|
||||
Ok(MetaSrvInstance { meta_srv, opts })
|
||||
Ok(MetaSrvInstance {
|
||||
meta_srv,
|
||||
opts,
|
||||
signal_sender: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.meta_srv.start().await;
|
||||
bootstrap_meta_srv_with_router(&self.opts.bind_addr, router(self.meta_srv.clone())).await?;
|
||||
let (tx, mut rx) = mpsc::channel::<()>(1);
|
||||
|
||||
self.signal_sender = Some(tx);
|
||||
|
||||
bootstrap_meta_srv_with_router(
|
||||
&self.opts.bind_addr,
|
||||
router(self.meta_srv.clone()),
|
||||
&mut rx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) -> Result<()> {
|
||||
// TODO: shutdown the router
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
if let Some(signal) = &self.signal_sender {
|
||||
signal
|
||||
.send(())
|
||||
.await
|
||||
.context(error::SendShutdownSignalSnafu)?;
|
||||
}
|
||||
|
||||
self.meta_srv.shutdown();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Bootstrap the rpc server to serve incoming request
|
||||
pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
|
||||
let meta_srv = make_meta_srv(&opts).await?;
|
||||
bootstrap_meta_srv_with_router(&opts.bind_addr, router(meta_srv)).await
|
||||
}
|
||||
|
||||
pub async fn bootstrap_meta_srv_with_router(bind_addr: &str, router: Router) -> Result<()> {
|
||||
pub async fn bootstrap_meta_srv_with_router(
|
||||
bind_addr: &str,
|
||||
router: Router,
|
||||
signal: &mut Receiver<()>,
|
||||
) -> Result<()> {
|
||||
let listener = TcpListener::bind(bind_addr)
|
||||
.await
|
||||
.context(error::TcpBindSnafu { addr: bind_addr })?;
|
||||
let listener = TcpListenerStream::new(listener);
|
||||
|
||||
router
|
||||
.serve_with_incoming(listener)
|
||||
.serve_with_incoming_shutdown(listener, async {
|
||||
signal.recv().await;
|
||||
})
|
||||
.await
|
||||
.context(error::StartGrpcSnafu)?;
|
||||
|
||||
|
||||
@@ -15,12 +15,16 @@
|
||||
use std::string::FromUtf8Error;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
use tonic::codegen::http;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to send shutdown signal"))]
|
||||
SendShutdownSignal { source: SendError<()> },
|
||||
|
||||
#[snafu(display("Error stream request next is None"))]
|
||||
StreamNone { backtrace: Backtrace },
|
||||
|
||||
@@ -312,6 +316,7 @@ impl ErrorExt for Error {
|
||||
| Error::LeaseGrant { .. }
|
||||
| Error::LockNotConfig { .. }
|
||||
| Error::ExceededRetryLimit { .. }
|
||||
| Error::SendShutdownSignal { .. }
|
||||
| Error::StartGrpc { .. } => StatusCode::Internal,
|
||||
Error::EmptyKey { .. }
|
||||
| Error::MissingRequiredParameter { .. }
|
||||
|
||||
575
src/meta-srv/src/failure_detector.rs
Normal file
575
src/meta-srv/src/failure_detector.rs
Normal file
@@ -0,0 +1,575 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
/// This is our port of Akka's "[PhiAccrualFailureDetector](https://github.com/akka/akka/blob/main/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala)"
|
||||
/// You can find it's document here:
|
||||
/// https://doc.akka.io/docs/akka/current/typed/failure-detector.html
|
||||
///
|
||||
/// Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their
|
||||
/// paper: [https://oneofus.la/have-emacs-will-hack/files/HDY04.pdf]
|
||||
///
|
||||
/// The suspicion level of failure is given by a value called φ (phi).
|
||||
/// The basic idea of the φ failure detector is to express the value of φ on a scale that
|
||||
/// is dynamically adjusted to reflect current network conditions. A configurable
|
||||
/// threshold is used to decide if φ is considered to be a failure.
|
||||
///
|
||||
/// The value of φ is calculated as:
|
||||
///
|
||||
/// φ = -log10(1 - F(timeSinceLastHeartbeat)
|
||||
///
|
||||
/// where F is the cumulative distribution function of a normal distribution with mean
|
||||
/// and standard deviation estimated from historical heartbeat inter-arrival times.
|
||||
pub(crate) struct PhiAccrualFailureDetector {
|
||||
/// A low threshold is prone to generate many wrong suspicions but ensures a quick detection
|
||||
/// in the event of a real crash. Conversely, a high threshold generates fewer mistakes but
|
||||
/// needs more time to detect actual crashes.
|
||||
threshold: f64,
|
||||
|
||||
/// Number of samples to use for calculation of mean and standard deviation of inter-arrival
|
||||
/// times.
|
||||
max_sample_size: u32,
|
||||
|
||||
/// Minimum standard deviation to use for the normal distribution used when calculating phi.
|
||||
/// Too low standard deviation might result in too much sensitivity for sudden, but normal,
|
||||
/// deviations in heartbeat inter arrival times.
|
||||
min_std_deviation_millis: f64,
|
||||
|
||||
/// Duration corresponding to number of potentially lost/delayed heartbeats that will be
|
||||
/// accepted before considering it to be an anomaly.
|
||||
/// This margin is important to be able to survive sudden, occasional, pauses in heartbeat
|
||||
/// arrivals, due to for example network drop.
|
||||
acceptable_heartbeat_pause_millis: i64,
|
||||
|
||||
/// Bootstrap the stats with heartbeats that corresponds to this duration, with a rather high
|
||||
/// standard deviation (since environment is unknown in the beginning).
|
||||
first_heartbeat_estimate_millis: i64,
|
||||
|
||||
heartbeat_history: HeartbeatHistory,
|
||||
last_heartbeat_millis: Option<i64>,
|
||||
}
|
||||
|
||||
impl Default for PhiAccrualFailureDetector {
|
||||
fn default() -> Self {
|
||||
// default configuration is the same as of Akka:
|
||||
// https://github.com/akka/akka/blob/main/akka-cluster/src/main/resources/reference.conf#L181
|
||||
let max_sample_size = 1000;
|
||||
Self {
|
||||
threshold: 8_f64,
|
||||
max_sample_size,
|
||||
min_std_deviation_millis: 100_f64,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(max_sample_size),
|
||||
last_heartbeat_millis: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PhiAccrualFailureDetector {
|
||||
pub(crate) fn heartbeat(&mut self, ts_millis: i64) {
|
||||
if let Some(last_heartbeat_millis) = self.last_heartbeat_millis {
|
||||
if ts_millis < last_heartbeat_millis {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.is_available(ts_millis) {
|
||||
let interval = ts_millis - last_heartbeat_millis;
|
||||
self.heartbeat_history.add(interval)
|
||||
}
|
||||
} else {
|
||||
// guess statistics for first heartbeat,
|
||||
// important so that connections with only one heartbeat becomes unavailable
|
||||
// bootstrap with 2 entries with rather high standard deviation
|
||||
let std_deviation = self.first_heartbeat_estimate_millis / 4;
|
||||
self.heartbeat_history
|
||||
.add(self.first_heartbeat_estimate_millis - std_deviation);
|
||||
self.heartbeat_history
|
||||
.add(self.first_heartbeat_estimate_millis + std_deviation);
|
||||
}
|
||||
let _ = self.last_heartbeat_millis.insert(ts_millis);
|
||||
}
|
||||
|
||||
pub(crate) fn is_available(&self, ts_millis: i64) -> bool {
|
||||
self.phi(ts_millis) < self.threshold
|
||||
}
|
||||
|
||||
/// The suspicion level of the accrual failure detector.
|
||||
///
|
||||
/// If a connection does not have any records in failure detector then it is considered healthy.
|
||||
fn phi(&self, ts_millis: i64) -> f64 {
|
||||
if let Some(last_heartbeat_millis) = self.last_heartbeat_millis {
|
||||
let time_diff = ts_millis - last_heartbeat_millis;
|
||||
let mean = self.heartbeat_history.mean();
|
||||
let std_deviation = self
|
||||
.heartbeat_history
|
||||
.std_deviation()
|
||||
.max(self.min_std_deviation_millis);
|
||||
|
||||
phi(
|
||||
time_diff,
|
||||
mean + self.acceptable_heartbeat_pause_millis as f64,
|
||||
std_deviation,
|
||||
)
|
||||
} else {
|
||||
// treat unmanaged connections, e.g. with zero heartbeats, as healthy connections
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculation of phi, derived from the Cumulative distribution function for
|
||||
/// N(mean, stdDeviation) normal distribution, given by
|
||||
/// 1.0 / (1.0 + math.exp(-y * (1.5976 + 0.070566 * y * y)))
|
||||
/// where y = (x - mean) / standard_deviation
|
||||
/// This is an approximation defined in β Mathematics Handbook (Logistic approximation).
|
||||
/// Error is 0.00014 at +- 3.16
|
||||
/// The calculated value is equivalent to -log10(1 - CDF(y))
|
||||
///
|
||||
/// Usually phi = 1 means likeliness that we will make a mistake is about 10%.
|
||||
/// The likeliness is about 1% with phi = 2, 0.1% with phi = 3 and so on.
|
||||
fn phi(time_diff: i64, mean: f64, std_deviation: f64) -> f64 {
|
||||
let time_diff = time_diff as f64;
|
||||
let y = (time_diff - mean) / std_deviation;
|
||||
let e = (-y * (1.5976 + 0.070566 * y * y)).exp();
|
||||
if time_diff > mean {
|
||||
-(e / (1.0 + e)).log10()
|
||||
} else {
|
||||
-(1.0 - 1.0 / (1.0 + e)).log10()
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds the heartbeat statistics.
|
||||
/// It is capped by the number of samples specified in `max_sample_size`.
|
||||
///
|
||||
/// The stats (mean, variance, std_deviation) are not defined for empty HeartbeatHistory.
|
||||
struct HeartbeatHistory {
|
||||
max_sample_size: u32,
|
||||
intervals: VecDeque<i64>,
|
||||
interval_sum: i64,
|
||||
squared_interval_sum: i64,
|
||||
}
|
||||
|
||||
impl HeartbeatHistory {
|
||||
fn new(max_sample_size: u32) -> Self {
|
||||
Self {
|
||||
max_sample_size,
|
||||
intervals: VecDeque::with_capacity(max_sample_size as usize),
|
||||
interval_sum: 0,
|
||||
squared_interval_sum: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn mean(&self) -> f64 {
|
||||
self.interval_sum as f64 / self.intervals.len() as f64
|
||||
}
|
||||
|
||||
fn variance(&self) -> f64 {
|
||||
let mean = self.mean();
|
||||
self.squared_interval_sum as f64 / self.intervals.len() as f64 - mean * mean
|
||||
}
|
||||
|
||||
fn std_deviation(&self) -> f64 {
|
||||
self.variance().sqrt()
|
||||
}
|
||||
|
||||
fn add(&mut self, interval: i64) {
|
||||
if self.intervals.len() as u32 >= self.max_sample_size {
|
||||
self.drop_oldest();
|
||||
}
|
||||
self.intervals.push_back(interval);
|
||||
self.interval_sum += interval;
|
||||
self.squared_interval_sum += interval * interval;
|
||||
}
|
||||
|
||||
fn drop_oldest(&mut self) {
|
||||
let oldest = self
|
||||
.intervals
|
||||
.pop_front()
|
||||
.expect("intervals must not empty here");
|
||||
self.interval_sum -= oldest;
|
||||
self.squared_interval_sum -= oldest * oldest;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_time::util::current_time_millis;
|
||||
use rand::Rng;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat() {
|
||||
// Generate 2000 heartbeats start from now. Heartbeat interval is one second, plus some
|
||||
// random millis.
|
||||
fn generate_heartbeats() -> Vec<i64> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let start = current_time_millis();
|
||||
(0..2000)
|
||||
.map(|i| start + i * 1000 + rng.gen_range(0..100))
|
||||
.collect::<Vec<i64>>()
|
||||
}
|
||||
let heartbeats = generate_heartbeats();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
// feed the failure detector with these heartbeats
|
||||
heartbeats.iter().for_each(|x| fd.heartbeat(*x));
|
||||
|
||||
let start = *heartbeats.last().unwrap();
|
||||
// Within the "acceptable_heartbeat_pause_millis" period, phi is zero ...
|
||||
for i in 1..=fd.acceptable_heartbeat_pause_millis / 1000 {
|
||||
let now = start + i * 1000;
|
||||
assert_eq!(fd.phi(now), 0.0);
|
||||
}
|
||||
|
||||
// ... then in less than two seconds, phi is above the threshold.
|
||||
// The same effect can be seen in the diagrams in Akka's document.
|
||||
let now = start + fd.acceptable_heartbeat_pause_millis + 1000;
|
||||
assert!(fd.phi(now) < fd.threshold);
|
||||
let now = start + fd.acceptable_heartbeat_pause_millis + 2000;
|
||||
assert!(fd.phi(now) > fd.threshold);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_available() {
|
||||
let ts_millis = current_time_millis();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
|
||||
// is available before first heartbeat
|
||||
assert!(fd.is_available(ts_millis));
|
||||
|
||||
fd.heartbeat(ts_millis);
|
||||
|
||||
// is available when heartbeat
|
||||
assert!(fd.is_available(ts_millis));
|
||||
// is available before heartbeat timeout
|
||||
assert!(fd.is_available(ts_millis + fd.acceptable_heartbeat_pause_millis / 2));
|
||||
// is not available after heartbeat timeout
|
||||
assert!(!fd.is_available(ts_millis + fd.acceptable_heartbeat_pause_millis * 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_heartbeat() {
|
||||
let ts_millis = current_time_millis();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
|
||||
// no heartbeat yet
|
||||
assert!(fd.last_heartbeat_millis.is_none());
|
||||
|
||||
fd.heartbeat(ts_millis);
|
||||
assert_eq!(fd.last_heartbeat_millis, Some(ts_millis));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_phi() {
|
||||
let ts_millis = current_time_millis();
|
||||
|
||||
let mut fd = PhiAccrualFailureDetector::default();
|
||||
|
||||
// phi == 0 before first heartbeat
|
||||
assert_eq!(fd.phi(ts_millis), 0.0);
|
||||
|
||||
fd.heartbeat(ts_millis);
|
||||
|
||||
// phi == 0 when heartbeat
|
||||
assert_eq!(fd.phi(ts_millis), 0.0);
|
||||
// phi < threshold before heartbeat timeout
|
||||
let now = ts_millis + fd.acceptable_heartbeat_pause_millis / 2;
|
||||
assert!(fd.phi(now) < fd.threshold);
|
||||
// phi >= threshold after heartbeat timeout
|
||||
let now = ts_millis + fd.acceptable_heartbeat_pause_millis * 2;
|
||||
assert!(fd.phi(now) >= fd.threshold);
|
||||
}
|
||||
|
||||
// The following test cases are port from Akka's test:
|
||||
// [AccrualFailureDetectorSpec.scala](https://github.com/akka/akka/blob/main/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala).
|
||||
|
||||
#[test]
|
||||
fn test_use_good_enough_cumulative_distribution_function() {
|
||||
fn cdf(phi: f64) -> f64 {
|
||||
1.0 - 10.0_f64.powf(-phi)
|
||||
}
|
||||
|
||||
assert!((cdf(phi(0, 0.0, 10.0)) - 0.5).abs() < 0.001);
|
||||
assert!((cdf(phi(6, 0.0, 10.0)) - 0.7257).abs() < 0.001);
|
||||
assert!((cdf(phi(15, 0.0, 10.0)) - 0.9332).abs() < 0.001);
|
||||
assert!((cdf(phi(20, 0.0, 10.0)) - 0.97725).abs() < 0.001);
|
||||
assert!((cdf(phi(25, 0.0, 10.0)) - 0.99379).abs() < 0.001);
|
||||
assert!((cdf(phi(35, 0.0, 10.0)) - 0.99977).abs() < 0.001);
|
||||
assert!((cdf(phi(40, 0.0, 10.0)) - 0.99997).abs() < 0.0001);
|
||||
|
||||
for w in (0..40).collect::<Vec<i64>>().windows(2) {
|
||||
assert!(phi(w[0], 0.0, 10.0) < phi(w[1], 0.0, 10.0));
|
||||
}
|
||||
|
||||
assert!((cdf(phi(22, 20.0, 3.0)) - 0.7475).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_outliers_without_losing_precision_or_hitting_exceptions() {
|
||||
assert!((phi(10, 0.0, 1.0) - 38.0).abs() < 1.0);
|
||||
assert_eq!(phi(-25, 0.0, 1.0), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_realistic_phi_values() {
|
||||
let test = vec![
|
||||
(0, 0.0),
|
||||
(500, 0.1),
|
||||
(1000, 0.3),
|
||||
(1200, 1.6),
|
||||
(1400, 4.7),
|
||||
(1600, 10.8),
|
||||
(1700, 15.3),
|
||||
];
|
||||
for (time_diff, expected_phi) in test {
|
||||
assert!((phi(time_diff, 1000.0, 100.0) - expected_phi).abs() < 0.1);
|
||||
}
|
||||
|
||||
// larger std_deviation results => lower phi
|
||||
assert!(phi(1100, 1000.0, 500.0) < phi(1100, 1000.0, 100.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_phi_of_0_on_startup_when_no_heartbeats() {
|
||||
let fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
assert_eq!(fd.phi(current_time_millis()), 0.0);
|
||||
assert_eq!(fd.phi(current_time_millis()), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_phi_based_on_guess_when_only_one_heartbeat() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
assert!((fd.phi(1000)).abs() - 0.3 < 0.2);
|
||||
assert!((fd.phi(2000)).abs() - 4.5 < 0.3);
|
||||
assert!((fd.phi(3000)).abs() > 15.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_return_phi_using_first_interval_after_second_heartbeat() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
assert!(fd.phi(100) > 0.0);
|
||||
fd.heartbeat(200);
|
||||
assert!(fd.phi(300) > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_available_after_a_series_of_successful_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
assert!(fd.last_heartbeat_millis.is_none());
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(1100);
|
||||
assert!(fd.last_heartbeat_millis.is_some());
|
||||
assert!(fd.is_available(1200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_not_available_if_heartbeat_are_missed() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 3.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(1100);
|
||||
assert!(fd.is_available(1200));
|
||||
assert!(!fd.is_available(8200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_available_if_it_starts_heartbeat_again_after_being_marked_dead_due_to_detection_of_failure(
|
||||
) {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
|
||||
// 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger
|
||||
// unreachable again
|
||||
|
||||
let mut now = 0;
|
||||
for _ in 0..1000 {
|
||||
fd.heartbeat(now);
|
||||
now += 1000;
|
||||
}
|
||||
now += 5 * 60 * 1000;
|
||||
assert!(!fd.is_available(now)); // after the long pause
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 900;
|
||||
assert!(fd.is_available(now));
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 7000;
|
||||
assert!(!fd.is_available(now)); // after the 7 seconds pause
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 900;
|
||||
assert!(fd.is_available(now));
|
||||
now += 100;
|
||||
fd.heartbeat(now);
|
||||
now += 900;
|
||||
assert!(fd.is_available(now));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accept_some_configured_missing_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(2000);
|
||||
fd.heartbeat(3000);
|
||||
assert!(fd.is_available(7000));
|
||||
fd.heartbeat(8000);
|
||||
assert!(fd.is_available(9000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_after_configured_acceptable_missing_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 1000,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 3000,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(1000),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(2000);
|
||||
fd.heartbeat(3000);
|
||||
fd.heartbeat(4000);
|
||||
fd.heartbeat(5000);
|
||||
assert!(fd.is_available(5500));
|
||||
fd.heartbeat(6000);
|
||||
assert!(!fd.is_available(11000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_use_max_sample_size_heartbeats() {
|
||||
let mut fd = PhiAccrualFailureDetector {
|
||||
threshold: 8.0,
|
||||
max_sample_size: 3,
|
||||
min_std_deviation_millis: 100.0,
|
||||
acceptable_heartbeat_pause_millis: 0,
|
||||
first_heartbeat_estimate_millis: 1000,
|
||||
heartbeat_history: HeartbeatHistory::new(3),
|
||||
last_heartbeat_millis: None,
|
||||
};
|
||||
// 100 ms interval
|
||||
fd.heartbeat(0);
|
||||
fd.heartbeat(100);
|
||||
fd.heartbeat(200);
|
||||
fd.heartbeat(300);
|
||||
let phi1 = fd.phi(400);
|
||||
// 500 ms interval, should become same phi when 100 ms intervals have been dropped
|
||||
fd.heartbeat(1000);
|
||||
fd.heartbeat(1500);
|
||||
fd.heartbeat(2000);
|
||||
fd.heartbeat(2500);
|
||||
let phi2 = fd.phi(3000);
|
||||
assert_eq!(phi1, phi2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_history_calculate_correct_mean_and_variance() {
|
||||
let mut history = HeartbeatHistory::new(20);
|
||||
for i in [100, 200, 125, 340, 130] {
|
||||
history.add(i);
|
||||
}
|
||||
assert!((history.mean() - 179.0).abs() < 0.00001);
|
||||
assert!((history.variance() - 7584.0).abs() < 0.00001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_history_have_0_variance_for_one_sample() {
|
||||
let mut history = HeartbeatHistory::new(600);
|
||||
history.add(1000);
|
||||
assert!((history.variance() - 0.0).abs() < 0.00001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_history_be_capped_by_the_specified_max_sample_size() {
|
||||
let mut history = HeartbeatHistory::new(3);
|
||||
history.add(100);
|
||||
history.add(110);
|
||||
history.add(90);
|
||||
assert!((history.mean() - 100.0).abs() < 0.00001);
|
||||
assert!((history.variance() - 66.6666667).abs() < 0.00001);
|
||||
history.add(140);
|
||||
assert!((history.mean() - 113.333333).abs() < 0.00001);
|
||||
assert!((history.variance() - 422.222222).abs() < 0.00001);
|
||||
history.add(80);
|
||||
assert!((history.mean() - 103.333333).abs() < 0.00001);
|
||||
assert!((history.variance() - 688.88888889).abs() < 0.00001);
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,9 @@ pub mod bootstrap;
|
||||
pub mod cluster;
|
||||
pub mod election;
|
||||
pub mod error;
|
||||
// TODO(LFC): TBC
|
||||
#[allow(dead_code)]
|
||||
mod failure_detector;
|
||||
pub mod handler;
|
||||
pub mod keys;
|
||||
pub mod lease;
|
||||
|
||||
@@ -523,6 +523,7 @@ async fn test_alter_table_add_column() {
|
||||
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
|
||||
assert_eq!(new_schema.version(), old_schema.version() + 1);
|
||||
assert_eq!(new_meta.next_column_id, old_meta.next_column_id + 2);
|
||||
assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -572,6 +573,7 @@ async fn test_alter_table_remove_column() {
|
||||
assert_eq!(&[1, 2], &new_meta.value_indices[..]);
|
||||
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
|
||||
assert_eq!(new_schema.version(), old_schema.version() + 1);
|
||||
assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -793,10 +795,10 @@ async fn test_flush_table_all_regions() {
|
||||
assert!(!has_parquet_file(®ion_dir));
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(None).await.unwrap();
|
||||
table.flush(None, None).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(None).await.unwrap();
|
||||
table.flush(None, None).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
@@ -832,10 +834,10 @@ async fn test_flush_table_with_region_id() {
|
||||
};
|
||||
|
||||
// Trigger flush all region
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
table.flush(req.region_number, Some(false)).await.unwrap();
|
||||
|
||||
// Trigger again, wait for the previous task finished
|
||||
table.flush(req.region_number).await.unwrap();
|
||||
table.flush(req.region_number, Some(true)).await.unwrap();
|
||||
|
||||
assert!(has_parquet_file(®ion_dir));
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@ use object_store::ObjectStore;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
|
||||
use store_api::storage::{
|
||||
AddColumn, AlterOperation, AlterRequest, ChunkReader, ReadContext, Region, RegionMeta,
|
||||
RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
|
||||
AddColumn, AlterOperation, AlterRequest, ChunkReader, FlushContext, ReadContext, Region,
|
||||
RegionMeta, RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
|
||||
};
|
||||
use table::error as table_error;
|
||||
use table::error::{RegionSchemaMismatchSnafu, Result as TableResult, TableOperationSnafu};
|
||||
@@ -323,20 +323,27 @@ impl<R: Region> Table for MitoTable<R> {
|
||||
Ok(rows_deleted)
|
||||
}
|
||||
|
||||
async fn flush(&self, region_number: Option<RegionNumber>) -> TableResult<()> {
|
||||
async fn flush(
|
||||
&self,
|
||||
region_number: Option<RegionNumber>,
|
||||
wait: Option<bool>,
|
||||
) -> TableResult<()> {
|
||||
let flush_ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
|
||||
if let Some(region_number) = region_number {
|
||||
if let Some(region) = self.regions.get(®ion_number) {
|
||||
region
|
||||
.flush()
|
||||
.flush(&flush_ctx)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
}
|
||||
} else {
|
||||
futures::future::try_join_all(self.regions.values().map(|region| region.flush()))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
futures::future::try_join_all(
|
||||
self.regions.values().map(|region| region.flush(&flush_ctx)),
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(table_error::TableOperationSnafu)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -26,9 +26,9 @@ use datatypes::schema::{ColumnSchema, Schema};
|
||||
use storage::metadata::{RegionMetaImpl, RegionMetadata};
|
||||
use storage::write_batch::WriteBatch;
|
||||
use store_api::storage::{
|
||||
AlterRequest, Chunk, ChunkReader, CreateOptions, EngineContext, GetRequest, GetResponse,
|
||||
OpenOptions, ReadContext, Region, RegionDescriptor, RegionId, ScanRequest, ScanResponse,
|
||||
SchemaRef, Snapshot, StorageEngine, WriteContext, WriteResponse,
|
||||
AlterRequest, Chunk, ChunkReader, CreateOptions, EngineContext, FlushContext, GetRequest,
|
||||
GetResponse, OpenOptions, ReadContext, Region, RegionDescriptor, RegionId, ScanRequest,
|
||||
ScanResponse, SchemaRef, Snapshot, StorageEngine, WriteContext, WriteResponse,
|
||||
};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, MockError>;
|
||||
@@ -201,7 +201,7 @@ impl Region for MockRegion {
|
||||
0
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
async fn flush(&self, _ctx: &FlushContext) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ format_num = "0.1"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
paste = "1.0"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
statrs = "0.16"
|
||||
stats-cli = "3.0"
|
||||
streaming-stats = "0.2"
|
||||
|
||||
@@ -50,7 +50,7 @@ postgres-types = { version = "0.2", features = ["with-chrono-0_4"] }
|
||||
promql-parser = "0.1.0"
|
||||
prost.workspace = true
|
||||
query = { path = "../query" }
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
regex = "1.6"
|
||||
rustls = "0.20"
|
||||
rustls-pemfile = "1.0"
|
||||
@@ -68,6 +68,7 @@ tokio-rustls = "0.23"
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tonic-reflection = "0.6"
|
||||
tower = { version = "0.4", features = ["full"] }
|
||||
tower-http = { version = "0.3", features = ["full"] }
|
||||
|
||||
@@ -80,7 +81,7 @@ common-test-util = { path = "../common/test-util" }
|
||||
mysql_async = { version = "0.31", default-features = false, features = [
|
||||
"default-rustls",
|
||||
] }
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
script = { path = "../script", features = ["python"] }
|
||||
serde_json = "1.0"
|
||||
table = { path = "../table" }
|
||||
|
||||
@@ -269,6 +269,12 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Invalid flush argument: {}", err_msg))]
|
||||
InvalidFlushArgument { err_msg: String },
|
||||
|
||||
#[snafu(display("Failed to build gRPC reflection service, source: {}", source))]
|
||||
GrpcReflectionService {
|
||||
source: tonic_reflection::server::Error,
|
||||
backtrace: Backtrace,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -287,6 +293,7 @@ impl ErrorExt for Error {
|
||||
| InvalidPromRemoteReadQueryResult { .. }
|
||||
| TcpBind { .. }
|
||||
| CatalogError { .. }
|
||||
| GrpcReflectionService { .. }
|
||||
| BuildingContext { .. } => StatusCode::Internal,
|
||||
|
||||
InsertScript { source, .. }
|
||||
|
||||
@@ -12,11 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod database;
|
||||
pub mod flight;
|
||||
pub mod handler;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_server::{GreptimeDatabase, GreptimeDatabaseServer};
|
||||
use arrow_flight::flight_service_server::{FlightService, FlightServiceServer};
|
||||
use async_trait::async_trait;
|
||||
use common_runtime::Runtime;
|
||||
@@ -27,18 +30,23 @@ use tokio::net::TcpListener;
|
||||
use tokio::sync::oneshot::{self, Sender};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_stream::wrappers::TcpListenerStream;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::auth::UserProviderRef;
|
||||
use crate::error::{AlreadyStartedSnafu, Result, StartGrpcSnafu, TcpBindSnafu};
|
||||
use crate::error::{
|
||||
AlreadyStartedSnafu, GrpcReflectionServiceSnafu, Result, StartGrpcSnafu, TcpBindSnafu,
|
||||
};
|
||||
use crate::grpc::database::DatabaseService;
|
||||
use crate::grpc::flight::FlightHandler;
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::server::Server;
|
||||
|
||||
type TonicResult<T> = std::result::Result<T, Status>;
|
||||
|
||||
pub struct GrpcServer {
|
||||
query_handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
shutdown_tx: Mutex<Option<Sender<()>>>,
|
||||
runtime: Arc<Runtime>,
|
||||
request_handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl GrpcServer {
|
||||
@@ -47,21 +55,23 @@ impl GrpcServer {
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
let request_handler = Arc::new(GreptimeRequestHandler::new(
|
||||
query_handler,
|
||||
user_provider,
|
||||
shutdown_tx: Mutex::new(None),
|
||||
runtime,
|
||||
));
|
||||
Self {
|
||||
shutdown_tx: Mutex::new(None),
|
||||
request_handler,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
let service = FlightHandler::new(
|
||||
self.query_handler.clone(),
|
||||
self.user_provider.clone(),
|
||||
self.runtime.clone(),
|
||||
);
|
||||
FlightServiceServer::new(service)
|
||||
pub fn create_flight_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
FlightServiceServer::new(FlightHandler::new(self.request_handler.clone()))
|
||||
}
|
||||
|
||||
pub fn create_database_service(&self) -> GreptimeDatabaseServer<impl GreptimeDatabase> {
|
||||
GreptimeDatabaseServer::new(DatabaseService::new(self.request_handler.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,9 +111,17 @@ impl Server for GrpcServer {
|
||||
(listener, addr)
|
||||
};
|
||||
|
||||
let reflection_service = tonic_reflection::server::Builder::configure()
|
||||
.register_encoded_file_descriptor_set(api::v1::GREPTIME_GRPC_DESC)
|
||||
.with_service_name("greptime.v1.GreptimeDatabase")
|
||||
.build()
|
||||
.context(GrpcReflectionServiceSnafu)?;
|
||||
|
||||
// Would block to serve requests.
|
||||
tonic::transport::Server::builder()
|
||||
.add_service(self.create_service())
|
||||
.add_service(self.create_flight_service())
|
||||
.add_service(self.create_database_service())
|
||||
.add_service(reflection_service)
|
||||
.serve_with_incoming_shutdown(TcpListenerStream::new(listener), rx.map(drop))
|
||||
.await
|
||||
.context(StartGrpcSnafu)?;
|
||||
|
||||
86
src/servers/src/grpc/database.rs
Normal file
86
src/servers/src/grpc/database.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_database_server::GreptimeDatabase;
|
||||
use api::v1::greptime_response::Response as RawResponse;
|
||||
use api::v1::{AffectedRows, GreptimeRequest, GreptimeResponse};
|
||||
use async_trait::async_trait;
|
||||
use common_query::Output;
|
||||
use futures::StreamExt;
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::grpc::TonicResult;
|
||||
|
||||
pub(crate) struct DatabaseService {
|
||||
handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl DatabaseService {
|
||||
pub(crate) fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl GreptimeDatabase for DatabaseService {
|
||||
async fn handle(
|
||||
&self,
|
||||
request: Request<GreptimeRequest>,
|
||||
) -> TonicResult<Response<GreptimeResponse>> {
|
||||
let request = request.into_inner();
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
let response = match output {
|
||||
Output::AffectedRows(rows) => GreptimeResponse {
|
||||
header: None,
|
||||
response: Some(RawResponse::AffectedRows(AffectedRows { value: rows as _ })),
|
||||
},
|
||||
Output::Stream(_) | Output::RecordBatches(_) => {
|
||||
return Err(Status::unimplemented("GreptimeDatabase::Handle for query"));
|
||||
}
|
||||
};
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn handle_requests(
|
||||
&self,
|
||||
request: Request<Streaming<GreptimeRequest>>,
|
||||
) -> Result<Response<GreptimeResponse>, Status> {
|
||||
let mut affected_rows = 0;
|
||||
|
||||
let mut stream = request.into_inner();
|
||||
while let Some(request) = stream.next().await {
|
||||
let request = request?;
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
match output {
|
||||
Output::AffectedRows(rows) => affected_rows += rows,
|
||||
Output::Stream(_) | Output::RecordBatches(_) => {
|
||||
return Err(Status::unimplemented(
|
||||
"GreptimeDatabase::HandleRequests for query",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let response = GreptimeResponse {
|
||||
header: None,
|
||||
response: Some(RawResponse::AffectedRows(AffectedRows {
|
||||
value: affected_rows as u32,
|
||||
})),
|
||||
};
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,7 @@ mod stream;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{Basic, GreptimeRequest, RequestHeader};
|
||||
use api::v1::GreptimeRequest;
|
||||
use arrow_flight::flight_service_server::FlightService;
|
||||
use arrow_flight::{
|
||||
Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo,
|
||||
@@ -27,40 +26,25 @@ use arrow_flight::{
|
||||
use async_trait::async_trait;
|
||||
use common_grpc::flight::{FlightEncoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use futures::Stream;
|
||||
use prost::Message;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
|
||||
use crate::auth::{Identity, UserProviderRef};
|
||||
use crate::error;
|
||||
use crate::error::Error::Auth;
|
||||
use crate::error::{NotFoundAuthHeaderSnafu, UnsupportedAuthSchemeSnafu};
|
||||
use crate::grpc::flight::stream::FlightRecordBatchStream;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use crate::grpc::handler::GreptimeRequestHandler;
|
||||
use crate::grpc::TonicResult;
|
||||
|
||||
type TonicResult<T> = Result<T, Status>;
|
||||
type TonicStream<T> = Pin<Box<dyn Stream<Item = TonicResult<T>> + Send + Sync + 'static>>;
|
||||
|
||||
pub struct FlightHandler {
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
handler: Arc<GreptimeRequestHandler>,
|
||||
}
|
||||
|
||||
impl FlightHandler {
|
||||
pub fn new(
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handler,
|
||||
user_provider,
|
||||
runtime,
|
||||
}
|
||||
pub fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,40 +89,8 @@ impl FlightService for FlightHandler {
|
||||
let request =
|
||||
GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
|
||||
|
||||
let query = request.request.context(error::InvalidQuerySnafu {
|
||||
reason: "Expecting non-empty GreptimeRequest.",
|
||||
})?;
|
||||
let query_ctx = create_query_context(request.header.as_ref());
|
||||
let output = self.handler.handle_request(request).await?;
|
||||
|
||||
auth(
|
||||
self.user_provider.as_ref(),
|
||||
request.header.as_ref(),
|
||||
&query_ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let handler = self.handler.clone();
|
||||
|
||||
// Executes requests in another runtime to
|
||||
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
|
||||
// - Refer to our blog for the rational behind it:
|
||||
// https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
|
||||
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
|
||||
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
|
||||
// 2. avoid the handler blocks the gRPC runtime incidentally.
|
||||
let handle = self
|
||||
.runtime
|
||||
.spawn(async move { handler.do_query(query, query_ctx).await });
|
||||
|
||||
let output = handle.await.map_err(|e| {
|
||||
if e.is_cancelled() {
|
||||
Status::cancelled(e.to_string())
|
||||
} else if e.is_panic() {
|
||||
Status::internal(format!("{:?}", e.into_panic()))
|
||||
} else {
|
||||
Status::unknown(e.to_string())
|
||||
}
|
||||
})??;
|
||||
let stream = to_flight_data_stream(output);
|
||||
Ok(Response::new(stream))
|
||||
}
|
||||
@@ -195,56 +147,3 @@ fn to_flight_data_stream(output: Output) -> TonicStream<FlightData> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
|
||||
let ctx = QueryContext::arc();
|
||||
if let Some(header) = header {
|
||||
if !header.catalog.is_empty() {
|
||||
ctx.set_current_catalog(&header.catalog);
|
||||
}
|
||||
|
||||
if !header.schema.is_empty() {
|
||||
ctx.set_current_schema(&header.schema);
|
||||
}
|
||||
};
|
||||
ctx
|
||||
}
|
||||
|
||||
async fn auth(
|
||||
user_provider: Option<&UserProviderRef>,
|
||||
request_header: Option<&RequestHeader>,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> TonicResult<()> {
|
||||
let Some(user_provider) = user_provider else { return Ok(()) };
|
||||
|
||||
let user_info = match request_header
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
.clone()
|
||||
.authorization
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
.auth_scheme
|
||||
.context(NotFoundAuthHeaderSnafu)?
|
||||
{
|
||||
AuthScheme::Basic(Basic { username, password }) => user_provider
|
||||
.authenticate(
|
||||
Identity::UserId(&username, None),
|
||||
crate::auth::Password::PlainText(&password),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Auth { source: e }),
|
||||
AuthScheme::Token(_) => UnsupportedAuthSchemeSnafu {
|
||||
name: "Token AuthScheme",
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
.map_err(|e| Status::unauthenticated(e.to_string()))?;
|
||||
|
||||
user_provider
|
||||
.authorize(
|
||||
&query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
&user_info,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::permission_denied(e.to_string()))
|
||||
}
|
||||
|
||||
137
src/servers/src/grpc/handler.rs
Normal file
137
src/servers/src/grpc/handler.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{Basic, GreptimeRequest, RequestHeader};
|
||||
use common_query::Output;
|
||||
use common_runtime::Runtime;
|
||||
use session::context::{QueryContext, QueryContextRef};
|
||||
use snafu::OptionExt;
|
||||
use tonic::Status;
|
||||
|
||||
use crate::auth::{Identity, Password, UserProviderRef};
|
||||
use crate::error::Error::{Auth, UnsupportedAuthScheme};
|
||||
use crate::error::{InvalidQuerySnafu, NotFoundAuthHeaderSnafu};
|
||||
use crate::grpc::TonicResult;
|
||||
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
|
||||
pub struct GreptimeRequestHandler {
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
}
|
||||
|
||||
impl GreptimeRequestHandler {
|
||||
pub fn new(
|
||||
handler: ServerGrpcQueryHandlerRef,
|
||||
user_provider: Option<UserProviderRef>,
|
||||
runtime: Arc<Runtime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handler,
|
||||
user_provider,
|
||||
runtime,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_request(&self, request: GreptimeRequest) -> TonicResult<Output> {
|
||||
let query = request.request.context(InvalidQuerySnafu {
|
||||
reason: "Expecting non-empty GreptimeRequest.",
|
||||
})?;
|
||||
|
||||
let header = request.header.as_ref();
|
||||
let query_ctx = create_query_context(header);
|
||||
|
||||
self.auth(header, &query_ctx).await?;
|
||||
|
||||
let handler = self.handler.clone();
|
||||
|
||||
// Executes requests in another runtime to
|
||||
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
|
||||
// - Refer to our blog for the rational behind it:
|
||||
// https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
|
||||
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
|
||||
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
|
||||
// 2. avoid the handler blocks the gRPC runtime incidentally.
|
||||
let handle = self
|
||||
.runtime
|
||||
.spawn(async move { handler.do_query(query, query_ctx).await });
|
||||
|
||||
let output = handle.await.map_err(|e| {
|
||||
if e.is_cancelled() {
|
||||
Status::cancelled(e.to_string())
|
||||
} else if e.is_panic() {
|
||||
Status::internal(format!("{:?}", e.into_panic()))
|
||||
} else {
|
||||
Status::unknown(e.to_string())
|
||||
}
|
||||
})??;
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
async fn auth(
|
||||
&self,
|
||||
header: Option<&RequestHeader>,
|
||||
query_ctx: &QueryContextRef,
|
||||
) -> TonicResult<()> {
|
||||
let Some(user_provider) = self.user_provider.as_ref() else { return Ok(()) };
|
||||
|
||||
let auth_scheme = header
|
||||
.and_then(|header| {
|
||||
header
|
||||
.authorization
|
||||
.as_ref()
|
||||
.and_then(|x| x.auth_scheme.clone())
|
||||
})
|
||||
.context(NotFoundAuthHeaderSnafu)?;
|
||||
|
||||
let user_info = match auth_scheme {
|
||||
AuthScheme::Basic(Basic { username, password }) => user_provider
|
||||
.authenticate(
|
||||
Identity::UserId(&username, None),
|
||||
Password::PlainText(&password),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Auth { source: e }),
|
||||
AuthScheme::Token(_) => Err(UnsupportedAuthScheme {
|
||||
name: "Token AuthScheme".to_string(),
|
||||
}),
|
||||
}
|
||||
.map_err(|e| Status::unauthenticated(e.to_string()))?;
|
||||
|
||||
user_provider
|
||||
.authorize(
|
||||
&query_ctx.current_catalog(),
|
||||
&query_ctx.current_schema(),
|
||||
&user_info,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::permission_denied(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
|
||||
let ctx = QueryContext::arc();
|
||||
if let Some(header) = header {
|
||||
if !header.catalog.is_empty() {
|
||||
ctx.set_current_catalog(&header.catalog);
|
||||
}
|
||||
if !header.schema.is_empty() {
|
||||
ctx.set_current_schema(&header.schema);
|
||||
}
|
||||
};
|
||||
ctx
|
||||
}
|
||||
@@ -24,6 +24,7 @@ use common_runtime::{Builder as RuntimeBuilder, Runtime};
|
||||
use servers::auth::UserProviderRef;
|
||||
use servers::error::{Result, StartGrpcSnafu, TcpBindSnafu};
|
||||
use servers::grpc::flight::FlightHandler;
|
||||
use servers::grpc::handler::GreptimeRequestHandler;
|
||||
use servers::query_handler::grpc::ServerGrpcQueryHandlerRef;
|
||||
use servers::server::Server;
|
||||
use snafu::ResultExt;
|
||||
@@ -54,11 +55,11 @@ impl MockGrpcServer {
|
||||
}
|
||||
|
||||
fn create_service(&self) -> FlightServiceServer<impl FlightService> {
|
||||
let service = FlightHandler::new(
|
||||
let service = FlightHandler::new(Arc::new(GreptimeRequestHandler::new(
|
||||
self.query_handler.clone(),
|
||||
self.user_provider.clone(),
|
||||
self.runtime.clone(),
|
||||
);
|
||||
)));
|
||||
FlightServiceServer::new(service)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use sqlparser::keywords::Keyword;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::parser::ParserContext;
|
||||
use crate::statements::copy::{CopyTable, CopyTableFrom, CopyTableTo, Format};
|
||||
use crate::statements::copy::{CopyTable, CopyTableArgument, Format};
|
||||
use crate::statements::statement::Statement;
|
||||
|
||||
// COPY tbl TO 'output.parquet';
|
||||
@@ -40,24 +40,24 @@ impl<'a> ParserContext<'a> {
|
||||
})?;
|
||||
|
||||
if self.parser.parse_keyword(Keyword::TO) {
|
||||
self.parse_copy_table_to(table_name)
|
||||
Ok(CopyTable::To(self.parse_copy_table_to(table_name)?))
|
||||
} else {
|
||||
self.parser
|
||||
.expect_keyword(Keyword::FROM)
|
||||
.context(error::SyntaxSnafu { sql: self.sql })?;
|
||||
self.parse_copy_table_from(table_name)
|
||||
Ok(CopyTable::From(self.parse_copy_table_from(table_name)?))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_copy_table_from(&mut self, table_name: ObjectName) -> Result<CopyTable> {
|
||||
let uri = self
|
||||
.parser
|
||||
.parse_literal_string()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
sql: self.sql,
|
||||
expected: "a uri",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
fn parse_copy_table_from(&mut self, table_name: ObjectName) -> Result<CopyTableArgument> {
|
||||
let location =
|
||||
self.parser
|
||||
.parse_literal_string()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
sql: self.sql,
|
||||
expected: "a uri",
|
||||
actual: self.peek_token_as_string(),
|
||||
})?;
|
||||
|
||||
let options = self
|
||||
.parser
|
||||
@@ -99,14 +99,17 @@ impl<'a> ParserContext<'a> {
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(CopyTable::From(CopyTableFrom::new(
|
||||
table_name, uri, format, pattern, connection,
|
||||
)))
|
||||
Ok(CopyTableArgument {
|
||||
table_name,
|
||||
format,
|
||||
pattern,
|
||||
connection,
|
||||
location,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_copy_table_to(&mut self, table_name: ObjectName) -> Result<CopyTable> {
|
||||
let file_name =
|
||||
fn parse_copy_table_to(&mut self, table_name: ObjectName) -> Result<CopyTableArgument> {
|
||||
let location =
|
||||
self.parser
|
||||
.parse_literal_string()
|
||||
.with_context(|_| error::UnexpectedSnafu {
|
||||
@@ -146,9 +149,13 @@ impl<'a> ParserContext<'a> {
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(CopyTable::To(CopyTableTo::new(
|
||||
table_name, file_name, format, connection,
|
||||
)))
|
||||
Ok(CopyTableArgument {
|
||||
table_name,
|
||||
format,
|
||||
connection,
|
||||
pattern: None,
|
||||
location,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_option_string(value: Value) -> Option<String> {
|
||||
@@ -197,7 +204,7 @@ mod tests {
|
||||
assert_eq!("schema0", schema);
|
||||
assert_eq!("tbl", table);
|
||||
|
||||
let file_name = copy_table.file_name;
|
||||
let file_name = copy_table.location;
|
||||
assert_eq!("tbl_file.parquet", file_name);
|
||||
|
||||
let format = copy_table.format;
|
||||
@@ -240,7 +247,7 @@ mod tests {
|
||||
assert_eq!("schema0", schema);
|
||||
assert_eq!("tbl", table);
|
||||
|
||||
let file_name = copy_table.from;
|
||||
let file_name = copy_table.location;
|
||||
assert_eq!("tbl_file.parquet", file_name);
|
||||
|
||||
let format = copy_table.format;
|
||||
|
||||
@@ -20,60 +20,18 @@ use crate::error::{self, Result};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum CopyTable {
|
||||
To(CopyTableTo),
|
||||
From(CopyTableFrom),
|
||||
To(CopyTableArgument),
|
||||
From(CopyTableArgument),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct CopyTableTo {
|
||||
pub table_name: ObjectName,
|
||||
pub file_name: String,
|
||||
pub format: Format,
|
||||
pub connection: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl CopyTableTo {
|
||||
pub(crate) fn new(
|
||||
table_name: ObjectName,
|
||||
file_name: String,
|
||||
format: Format,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_name,
|
||||
file_name,
|
||||
format,
|
||||
connection,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: To combine struct CopyTableFrom and CopyTableTo
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct CopyTableFrom {
|
||||
pub struct CopyTableArgument {
|
||||
pub table_name: ObjectName,
|
||||
pub format: Format,
|
||||
pub connection: HashMap<String, String>,
|
||||
pub pattern: Option<String>,
|
||||
pub from: String,
|
||||
}
|
||||
|
||||
impl CopyTableFrom {
|
||||
pub(crate) fn new(
|
||||
table_name: ObjectName,
|
||||
from: String,
|
||||
format: Format,
|
||||
pattern: Option<String>,
|
||||
connection: HashMap<String, String>,
|
||||
) -> Self {
|
||||
CopyTableFrom {
|
||||
table_name,
|
||||
format,
|
||||
connection,
|
||||
pattern,
|
||||
from,
|
||||
}
|
||||
}
|
||||
/// Copy tbl [To|From] 'location'.
|
||||
pub location: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
|
||||
@@ -48,7 +48,7 @@ criterion = "0.3"
|
||||
common-test-util = { path = "../common/test-util" }
|
||||
datatypes = { path = "../datatypes", features = ["test"] }
|
||||
log-store = { path = "../log-store" }
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.8"
|
||||
|
||||
@@ -27,8 +27,8 @@ use snafu::ResultExt;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
|
||||
use store_api::storage::{
|
||||
AlterRequest, OpenOptions, ReadContext, Region, RegionId, SequenceNumber, WriteContext,
|
||||
WriteResponse,
|
||||
AlterRequest, FlushContext, OpenOptions, ReadContext, Region, RegionId, SequenceNumber,
|
||||
WriteContext, WriteResponse,
|
||||
};
|
||||
|
||||
use crate::compaction::CompactionSchedulerRef;
|
||||
@@ -136,8 +136,8 @@ impl<S: LogStore> Region for RegionImpl<S> {
|
||||
.sum()
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
self.inner.flush().await
|
||||
async fn flush(&self, ctx: &FlushContext) -> Result<()> {
|
||||
self.inner.flush(ctx).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -436,10 +436,6 @@ impl<S: LogStore> RegionImpl<S> {
|
||||
self.inner.version_control().current_manifest_version()
|
||||
}
|
||||
|
||||
async fn wait_flush_done(&self) -> Result<()> {
|
||||
self.inner.writer.wait_flush_done().await
|
||||
}
|
||||
|
||||
/// Write to inner, also the `RegionWriter` directly.
|
||||
async fn write_inner(&self, ctx: &WriteContext, request: WriteBatch) -> Result<WriteResponse> {
|
||||
self.inner.write(ctx, request).await
|
||||
@@ -565,7 +561,7 @@ impl<S: LogStore> RegionInner<S> {
|
||||
self.writer.close().await
|
||||
}
|
||||
|
||||
async fn flush(&self) -> Result<()> {
|
||||
async fn flush(&self, ctx: &FlushContext) -> Result<()> {
|
||||
let writer_ctx = WriterContext {
|
||||
shared: &self.shared,
|
||||
flush_strategy: &self.flush_strategy,
|
||||
@@ -576,6 +572,6 @@ impl<S: LogStore> RegionInner<S> {
|
||||
writer: &self.writer,
|
||||
manifest: &self.manifest,
|
||||
};
|
||||
self.writer.flush(writer_ctx).await
|
||||
self.writer.flush(writer_ctx, ctx).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
use store_api::storage::{OpenOptions, Region, WriteResponse};
|
||||
use store_api::storage::{FlushContext, OpenOptions, Region, WriteResponse};
|
||||
|
||||
use crate::engine;
|
||||
use crate::flush::FlushStrategyRef;
|
||||
@@ -91,12 +91,9 @@ impl FlushTester {
|
||||
self.base().full_scan().await
|
||||
}
|
||||
|
||||
async fn wait_flush_done(&self) {
|
||||
self.base().region.wait_flush_done().await.unwrap();
|
||||
}
|
||||
|
||||
async fn flush(&self) {
|
||||
self.base().region.flush().await.unwrap();
|
||||
async fn flush(&self, wait: Option<bool>) {
|
||||
let ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
|
||||
self.base().region.flush(&ctx).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,8 +143,7 @@ async fn test_manual_flush() {
|
||||
let sst_dir = format!("{}/{}", store_dir, engine::region_sst_dir("", REGION_NAME));
|
||||
assert!(!has_parquet_file(&sst_dir));
|
||||
|
||||
tester.flush().await;
|
||||
tester.wait_flush_done().await;
|
||||
tester.flush(None).await;
|
||||
|
||||
assert!(has_parquet_file(&sst_dir));
|
||||
}
|
||||
@@ -160,15 +156,12 @@ async fn test_flush_empty() {
|
||||
let flush_switch = Arc::new(FlushSwitch::default());
|
||||
let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
|
||||
|
||||
// Now set should flush to true to trigger flush.
|
||||
flush_switch.set_should_flush(true);
|
||||
// Flush empty table.
|
||||
tester.flush(None).await;
|
||||
let data = [(1000, Some(100))];
|
||||
// Put element to trigger flush.
|
||||
tester.put(&data).await;
|
||||
tester.wait_flush_done().await;
|
||||
|
||||
// Disable flush.
|
||||
flush_switch.set_should_flush(false);
|
||||
// Put again.
|
||||
let data = [(2000, Some(200))];
|
||||
tester.put(&data).await;
|
||||
@@ -197,12 +190,11 @@ async fn test_read_after_flush() {
|
||||
tester.put(&[(1000, Some(100))]).await;
|
||||
tester.put(&[(2000, Some(200))]).await;
|
||||
|
||||
// Now set should flush to true to trigger flush.
|
||||
flush_switch.set_should_flush(true);
|
||||
// Flush.
|
||||
tester.flush(None).await;
|
||||
|
||||
// Put element to trigger flush.
|
||||
// Put element again.
|
||||
tester.put(&[(3000, Some(300))]).await;
|
||||
tester.wait_flush_done().await;
|
||||
|
||||
let expect = vec![(1000, Some(100)), (2000, Some(200)), (3000, Some(300))];
|
||||
|
||||
@@ -230,24 +222,21 @@ async fn test_merge_read_after_flush() {
|
||||
tester.put(&[(3000, Some(300))]).await;
|
||||
tester.put(&[(2000, Some(200))]).await;
|
||||
|
||||
// Now set should flush to true to trigger flush.
|
||||
flush_switch.set_should_flush(true);
|
||||
// Flush content to SST1.
|
||||
tester.flush(None).await;
|
||||
|
||||
// Put element to trigger flush (In SST2).
|
||||
// Put element (In SST2).
|
||||
tester.put(&[(2000, Some(201))]).await;
|
||||
tester.wait_flush_done().await;
|
||||
|
||||
// Disable flush.
|
||||
flush_switch.set_should_flush(false);
|
||||
// In SST2.
|
||||
tester.put(&[(2000, Some(202))]).await;
|
||||
tester.put(&[(1000, Some(100))]).await;
|
||||
|
||||
// Enable flush.
|
||||
flush_switch.set_should_flush(true);
|
||||
// Trigger flush and overwrite row (In memtable).
|
||||
// Trigger flush.
|
||||
tester.flush(None).await;
|
||||
|
||||
// Overwrite row (In memtable).
|
||||
tester.put(&[(2000, Some(203))]).await;
|
||||
tester.wait_flush_done().await;
|
||||
|
||||
let expect = vec![(1000, Some(100)), (2000, Some(203)), (3000, Some(300))];
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ use futures::TryStreamExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::manifest::{Manifest, ManifestVersion, MetaAction};
|
||||
use store_api::storage::{AlterRequest, SequenceNumber, WriteContext, WriteResponse};
|
||||
use store_api::storage::{AlterRequest, FlushContext, SequenceNumber, WriteContext, WriteResponse};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::background::JobHandle;
|
||||
@@ -261,12 +261,24 @@ impl RegionWriter {
|
||||
}
|
||||
|
||||
/// Flush task manually
|
||||
pub async fn flush<S: LogStore>(&self, writer_ctx: WriterContext<'_, S>) -> Result<()> {
|
||||
pub async fn flush<S: LogStore>(
|
||||
&self,
|
||||
writer_ctx: WriterContext<'_, S>,
|
||||
ctx: &FlushContext,
|
||||
) -> Result<()> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
ensure!(!inner.is_closed(), error::ClosedRegionSnafu);
|
||||
|
||||
inner.manual_flush(writer_ctx).await
|
||||
inner.manual_flush(writer_ctx).await?;
|
||||
|
||||
if ctx.wait {
|
||||
if let Some(handle) = inner.flush_handle.take() {
|
||||
handle.join().await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Cancel flush task if any
|
||||
@@ -284,19 +296,6 @@ impl RegionWriter {
|
||||
}
|
||||
}
|
||||
|
||||
// Private methods for tests.
|
||||
#[cfg(test)]
|
||||
impl RegionWriter {
|
||||
pub async fn wait_flush_done(&self) -> Result<()> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
if let Some(handle) = inner.flush_handle.take() {
|
||||
handle.join().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WriterContext<'a, S: LogStore> {
|
||||
pub shared: &'a SharedDataRef,
|
||||
pub flush_strategy: &'a FlushStrategyRef,
|
||||
|
||||
@@ -34,7 +34,7 @@ pub use self::chunk::{Chunk, ChunkReader};
|
||||
pub use self::descriptors::*;
|
||||
pub use self::engine::{CreateOptions, EngineContext, OpenOptions, StorageEngine};
|
||||
pub use self::metadata::RegionMeta;
|
||||
pub use self::region::{Region, WriteContext};
|
||||
pub use self::region::{FlushContext, Region, WriteContext};
|
||||
pub use self::requests::{
|
||||
AddColumn, AlterOperation, AlterRequest, GetRequest, ScanRequest, WriteRequest,
|
||||
};
|
||||
|
||||
@@ -77,7 +77,8 @@ pub trait Region: Send + Sync + Clone + std::fmt::Debug + 'static {
|
||||
|
||||
fn disk_usage_bytes(&self) -> u64;
|
||||
|
||||
async fn flush(&self) -> Result<(), Self::Error>;
|
||||
/// Flush memtable of the region to disk.
|
||||
async fn flush(&self, ctx: &FlushContext) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Context for write operations.
|
||||
@@ -89,3 +90,17 @@ impl From<&OpenOptions> for WriteContext {
|
||||
WriteContext::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Context for flush operations.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FlushContext {
|
||||
/// If true, the flush will wait until the flush is done.
|
||||
/// Default: true
|
||||
pub wait: bool,
|
||||
}
|
||||
|
||||
impl Default for FlushContext {
|
||||
fn default() -> FlushContext {
|
||||
FlushContext { wait: true }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,6 +89,9 @@ pub struct TableIdent {
|
||||
pub version: TableVersion,
|
||||
}
|
||||
|
||||
/// The table medatadata
|
||||
/// Note: if you add new fields to this struct, please ensure 'new_meta_builder' function works.
|
||||
/// TODO(dennis): find a better way to ensure 'new_meta_builder' works when adding new fields.
|
||||
#[derive(Clone, Debug, Builder, PartialEq, Eq)]
|
||||
#[builder(pattern = "mutable")]
|
||||
pub struct TableMeta {
|
||||
@@ -197,6 +200,7 @@ impl TableMeta {
|
||||
.engine_options(self.engine_options.clone())
|
||||
.options(self.options.clone())
|
||||
.created_on(self.created_on)
|
||||
.region_numbers(self.region_numbers.clone())
|
||||
.next_column_id(self.next_column_id);
|
||||
|
||||
builder
|
||||
@@ -572,6 +576,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let new_meta = add_columns_to_meta(&meta);
|
||||
assert_eq!(meta.region_numbers, new_meta.region_numbers);
|
||||
|
||||
let names: Vec<String> = new_meta
|
||||
.schema
|
||||
@@ -606,6 +611,8 @@ mod tests {
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(meta.region_numbers, new_meta.region_numbers);
|
||||
|
||||
let names: Vec<String> = new_meta
|
||||
.schema
|
||||
.column_schemas()
|
||||
|
||||
@@ -190,24 +190,22 @@ pub struct DeleteRequest {
|
||||
pub key_column_values: HashMap<String, VectorRef>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum CopyDirection {
|
||||
Export,
|
||||
Import,
|
||||
}
|
||||
|
||||
/// Copy table request
|
||||
#[derive(Debug)]
|
||||
pub struct CopyTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
pub file_name: String,
|
||||
pub connection: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CopyTableFromRequest {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
pub table_name: String,
|
||||
pub location: String,
|
||||
pub connection: HashMap<String, String>,
|
||||
pub pattern: Option<String>,
|
||||
pub from: String,
|
||||
pub direction: CopyDirection,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
@@ -216,6 +214,8 @@ pub struct FlushTableRequest {
|
||||
pub schema_name: String,
|
||||
pub table_name: Option<String>,
|
||||
pub region_number: Option<RegionNumber>,
|
||||
/// Wait until the flush is done.
|
||||
pub wait: Option<bool>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -96,8 +96,12 @@ pub trait Table: Send + Sync {
|
||||
}
|
||||
|
||||
/// Flush table.
|
||||
async fn flush(&self, region_number: Option<RegionNumber>) -> Result<()> {
|
||||
let _ = region_number;
|
||||
///
|
||||
/// Options:
|
||||
/// - region_number: specify region to flush.
|
||||
/// - wait: Whether to wait until flush is done.
|
||||
async fn flush(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()> {
|
||||
let _ = (region_number, wait);
|
||||
UnsupportedSnafu { operation: "FLUSH" }.fail()?
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ frontend = { path = "../src/frontend" }
|
||||
mito = { path = "../src/mito", features = ["test"] }
|
||||
object-store = { path = "../src/object-store" }
|
||||
once_cell = "1.16"
|
||||
rand = "0.8"
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
servers = { path = "../src/servers" }
|
||||
|
||||
@@ -183,7 +183,7 @@ async fn insert_and_assert(db: &Database) {
|
||||
row_count: 4,
|
||||
};
|
||||
let result = db.insert(request).await;
|
||||
result.unwrap();
|
||||
assert_eq!(result.unwrap(), 4);
|
||||
|
||||
let result = db
|
||||
.sql(
|
||||
|
||||
79
tests/cases/standalone/copy/copy_from_fs.result
Normal file
79
tests/cases/standalone/copy/copy_from_fs.result
Normal file
@@ -0,0 +1,79 @@
|
||||
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
Copy demo TO '/tmp/demo/export/demo.parquet';
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
Copy with_filename FROM '/tmp/demo/export/demo.parquet_1_2';
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
select * from with_filename order by ts;
|
||||
|
||||
+-------+------+--------+---------------------+
|
||||
| host | cpu | memory | ts |
|
||||
+-------+------+--------+---------------------+
|
||||
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
|
||||
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
|
||||
+-------+------+--------+---------------------+
|
||||
|
||||
CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
Copy with_path FROM '/tmp/demo/export/';
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
select * from with_path order by ts;
|
||||
|
||||
+-------+------+--------+---------------------+
|
||||
| host | cpu | memory | ts |
|
||||
+-------+------+--------+---------------------+
|
||||
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
|
||||
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
|
||||
+-------+------+--------+---------------------+
|
||||
|
||||
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
Copy with_pattern FROM '/tmp/demo/export/' WITH (PATTERN = 'demo.*');
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
select * from with_pattern order by ts;
|
||||
|
||||
+-------+------+--------+---------------------+
|
||||
| host | cpu | memory | ts |
|
||||
+-------+------+--------+---------------------+
|
||||
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
|
||||
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
|
||||
+-------+------+--------+---------------------+
|
||||
|
||||
drop table demo;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
drop table with_filename;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
drop table with_path;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
drop table with_pattern;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
31
tests/cases/standalone/copy/copy_from_fs.sql
Normal file
31
tests/cases/standalone/copy/copy_from_fs.sql
Normal file
@@ -0,0 +1,31 @@
|
||||
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
|
||||
|
||||
insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
|
||||
|
||||
Copy demo TO '/tmp/demo/export/demo.parquet';
|
||||
|
||||
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
|
||||
|
||||
Copy with_filename FROM '/tmp/demo/export/demo.parquet_1_2';
|
||||
|
||||
select * from with_filename order by ts;
|
||||
|
||||
CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time index);
|
||||
|
||||
Copy with_path FROM '/tmp/demo/export/';
|
||||
|
||||
select * from with_path order by ts;
|
||||
|
||||
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
|
||||
|
||||
Copy with_pattern FROM '/tmp/demo/export/' WITH (PATTERN = 'demo.*');
|
||||
|
||||
select * from with_pattern order by ts;
|
||||
|
||||
drop table demo;
|
||||
|
||||
drop table with_filename;
|
||||
|
||||
drop table with_path;
|
||||
|
||||
drop table with_pattern;
|
||||
16
tests/cases/standalone/copy/copy_to_fs.result
Normal file
16
tests/cases/standalone/copy/copy_to_fs.result
Normal file
@@ -0,0 +1,16 @@
|
||||
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
Copy demo TO '/tmp/export/demo.parquet';
|
||||
|
||||
Affected Rows: 2
|
||||
|
||||
drop table demo;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
7
tests/cases/standalone/copy/copy_to_fs.sql
Normal file
7
tests/cases/standalone/copy/copy_to_fs.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
|
||||
|
||||
insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
|
||||
|
||||
Copy demo TO '/tmp/export/demo.parquet';
|
||||
|
||||
drop table demo;
|
||||
Reference in New Issue
Block a user