mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-24 23:19:57 +00:00
Compare commits
110 Commits
v0.5.0-nig
...
script_wra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24f5e56196 | ||
|
|
c85d569797 | ||
|
|
e95a8e070c | ||
|
|
b71bf11772 | ||
|
|
ee0a3972fc | ||
|
|
8fb40c66a4 | ||
|
|
e855f6370e | ||
|
|
fb5dcbc40c | ||
|
|
0d109436b8 | ||
|
|
cbae03af07 | ||
|
|
902e6ead60 | ||
|
|
f9e7762c5b | ||
|
|
0b421b5177 | ||
|
|
aa89d9deef | ||
|
|
b3ffe5cd1e | ||
|
|
d6ef7a75de | ||
|
|
6344b1e0db | ||
|
|
7d506b3c5f | ||
|
|
96e12e9ee5 | ||
|
|
a9db80ab1a | ||
|
|
5f5dbe0172 | ||
|
|
dac7a41cbd | ||
|
|
de416465a6 | ||
|
|
58c13739f0 | ||
|
|
806400caff | ||
|
|
f78dab078c | ||
|
|
7a14db68a6 | ||
|
|
c26f2f94c0 | ||
|
|
781f2422b3 | ||
|
|
7e68ecc498 | ||
|
|
9ce9421850 | ||
|
|
c0df2b9086 | ||
|
|
29d344ccd2 | ||
|
|
fe2fc723bc | ||
|
|
2332305b90 | ||
|
|
9ccd182109 | ||
|
|
ae8153515b | ||
|
|
cce5edc88e | ||
|
|
616eb04914 | ||
|
|
7c53f92e4b | ||
|
|
445bd92c7a | ||
|
|
92a9802343 | ||
|
|
abbac46c05 | ||
|
|
d0d0f091f0 | ||
|
|
707a0d5626 | ||
|
|
e42767d500 | ||
|
|
ca18ccf7d4 | ||
|
|
b1d8812806 | ||
|
|
7547e7ebdf | ||
|
|
6100cb335a | ||
|
|
0badb3715e | ||
|
|
bd9c2f2666 | ||
|
|
b3edbef1f3 | ||
|
|
9e58bba363 | ||
|
|
3a4c9f2b45 | ||
|
|
64a36e9b36 | ||
|
|
33566ea0f0 | ||
|
|
ff8ab6763b | ||
|
|
00e4bd45f0 | ||
|
|
85eebcb16f | ||
|
|
102e43aace | ||
|
|
56fc77e573 | ||
|
|
4c76d4d97e | ||
|
|
9e5cdf47d9 | ||
|
|
bdb677dc52 | ||
|
|
99dbb7401c | ||
|
|
a7bbd61f28 | ||
|
|
efc5abfc02 | ||
|
|
43a7457e15 | ||
|
|
20f01219e9 | ||
|
|
dc351a6de9 | ||
|
|
5f87b1f714 | ||
|
|
b9146c88ff | ||
|
|
9558b3c201 | ||
|
|
da68d8ce4b | ||
|
|
01867adaa7 | ||
|
|
d9eeeee06e | ||
|
|
4fcda272fb | ||
|
|
ce959ddd3f | ||
|
|
730a3faa02 | ||
|
|
91820a8006 | ||
|
|
500e299e40 | ||
|
|
ac4b6cd7f0 | ||
|
|
3ab494764f | ||
|
|
5608035074 | ||
|
|
e083b8011c | ||
|
|
06327fba1e | ||
|
|
06da33b1ed | ||
|
|
2aa6ac5731 | ||
|
|
b28af9443b | ||
|
|
142035340d | ||
|
|
d2cf72e0f1 | ||
|
|
ae27fbc7f2 | ||
|
|
9bd10134dd | ||
|
|
3329da5b72 | ||
|
|
a24f8c96b3 | ||
|
|
a691cff0c4 | ||
|
|
f92b55c745 | ||
|
|
a9e5b902fd | ||
|
|
5b978269cc | ||
|
|
3dffc7b62c | ||
|
|
968c872d15 | ||
|
|
e2a770f8de | ||
|
|
dc46e96879 | ||
|
|
8f3b299a45 | ||
|
|
506e6887f3 | ||
|
|
1757061272 | ||
|
|
6599bb5a46 | ||
|
|
3f981ef2b3 | ||
|
|
5cff735e02 |
@@ -12,9 +12,4 @@ rustflags = [
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
|
||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
||||
"-Aclippy::needless_pass_by_ref_mut",
|
||||
]
|
||||
|
||||
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -41,13 +41,27 @@ body:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: What happened?
|
||||
label: Minimal reproduce step
|
||||
description: |
|
||||
Tell us what happened and also what you would have expected to
|
||||
happen instead.
|
||||
placeholder: "Describe the bug"
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-manner
|
||||
attributes:
|
||||
label: What did you expect to see?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-manner
|
||||
attributes:
|
||||
label: What did you see instead?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -72,14 +86,3 @@ body:
|
||||
trace. This will be automatically formatted into code, so no
|
||||
need for backticks.
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: How can we reproduce the bug?
|
||||
description: |
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -31,10 +31,12 @@ runs:
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
||||
echo "omitBody=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
||||
echo "omitBody=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Publish release
|
||||
@@ -45,6 +47,7 @@ runs:
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ inputs.version }}
|
||||
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
||||
omitBody: ${{ env.omitBody }} # omitBody is true when the release is a official release.
|
||||
allowUpdates: true
|
||||
artifacts: |
|
||||
**/greptime-*/*
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
17
.github/workflows/develop.yml
vendored
17
.github/workflows/develop.yml
vendored
@@ -29,7 +29,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -42,7 +42,10 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -161,15 +164,18 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
@@ -179,6 +185,7 @@ jobs:
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
2
.github/workflows/nightly-ci.yml
vendored
2
.github/workflows/nightly-ci.yml
vendored
@@ -12,7 +12,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
|
||||
1470
Cargo.lock
generated
1470
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
20
Cargo.toml
20
Cargo.toml
@@ -43,25 +43,27 @@ members = [
|
||||
"src/partition",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/index",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.2"
|
||||
version = "0.4.4"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
@@ -71,6 +73,8 @@ async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
@@ -81,13 +85,15 @@ datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev =
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.12"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7eb2e78be7a104d2582fbea0bcb1e019407da702" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
@@ -97,23 +103,25 @@ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.gi
|
||||
] }
|
||||
parquet = "47.0"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
rust_decimal = "1.32.0"
|
||||
rust_decimal = "1.33"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = "0.7"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -134,6 +142,7 @@ common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
@@ -171,7 +180,6 @@ script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
4
Makefile
4
Makefile
@@ -157,11 +157,11 @@ sqlness-test: ## Run sqlness test.
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
cargo check --workspace --all-targets
|
||||
cargo check --workspace --all-targets --all-features
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
|
||||
13
README.md
13
README.md
@@ -27,14 +27,6 @@
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
## Upcoming Event
|
||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
||||
<p align="center">
|
||||
<picture>
|
||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## What is GreptimeDB
|
||||
|
||||
GreptimeDB is an open-source time-series database with a special focus on
|
||||
@@ -108,7 +100,7 @@ Please see the online document site for more installation options and [operation
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
@@ -117,7 +109,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
@@ -143,6 +135,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||
|
||||
## Project Status
|
||||
|
||||
|
||||
@@ -152,6 +152,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
semantic_type: semantic_type as i32,
|
||||
..Default::default()
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
@@ -266,6 +267,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
@@ -274,6 +276,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
@@ -282,6 +285,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
@@ -290,6 +294,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
@@ -298,6 +303,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
@@ -306,6 +312,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
@@ -314,6 +321,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
@@ -322,6 +330,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
@@ -330,6 +339,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
@@ -338,6 +348,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
@@ -346,6 +357,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
@@ -354,6 +366,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
@@ -362,6 +375,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
@@ -370,6 +384,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
@@ -378,6 +393,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
@@ -386,6 +402,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
@@ -394,6 +411,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
@@ -402,6 +420,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
@@ -410,6 +429,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
|
||||
@@ -53,33 +53,6 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
@@ -91,8 +64,8 @@ worker_channel_size = 128
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Manifest compression type
|
||||
manifest_compress_type = "Uncompressed"
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
@@ -105,13 +78,12 @@ global_write_buffer_reject_size = "2GB"
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
|
||||
# Log options
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
|
||||
@@ -122,35 +122,35 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
# Number of region workers
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
worker_channel_size = 128
|
||||
# Max batch size for a worker to handle requests
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
@@ -158,3 +158,9 @@ global_write_buffer_size = "1GB"
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# whether enable tracing, default is false
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
|
||||
47
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
47
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
@@ -0,0 +1,47 @@
|
||||
# Use the legacy glibc 2.28.
|
||||
FROM ubuntu:18.10
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
||||
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
unzip \
|
||||
pkg-config
|
||||
|
||||
# Install protoc.
|
||||
ENV PROTOC_VERSION=25.1
|
||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi && \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
||||
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
||||
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
||||
rm -f ${PROTOC_ZIP}
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
@@ -50,10 +50,10 @@ The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series
|
||||
```
|
||||
|
||||
The following parts will describe these implementation details:
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
|
||||
## Routing
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-08-07"
|
||||
channel = "nightly-2023-10-21"
|
||||
|
||||
157
scripts/run-pyo3-greptime.sh
Executable file
157
scripts/run-pyo3-greptime.sh
Executable file
@@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script configures the environment to run 'greptime' with the required Python version
|
||||
|
||||
# This script should be compatible both in Linux and macOS
|
||||
OS_TYPE="$(uname)"
|
||||
readonly OS_TYPE
|
||||
|
||||
check_command_existence() {
|
||||
command -v "$1" &> /dev/null
|
||||
}
|
||||
|
||||
get_python_version() {
|
||||
case "$OS_TYPE" in
|
||||
Darwin)
|
||||
otool -L $GREPTIME_BIN_PATH | grep -o 'Python.framework/Versions/3.[0-9]\+/Python' | grep -o '3.[0-9]\+'
|
||||
;;
|
||||
Linux)
|
||||
ldd $GREPTIME_BIN_PATH | grep -o 'libpython3\.[0-9]\+' | grep -o '3\.[0-9]\+'
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported OS type: $OS_TYPE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
setup_virtualenv() {
|
||||
local req_py_version="$1"
|
||||
local env_name="GreptimeTmpVenv$req_py_version"
|
||||
virtualenv --python=python"$req_py_version" "$env_name"
|
||||
source "$env_name/bin/activate"
|
||||
}
|
||||
|
||||
setup_conda_env() {
|
||||
local req_py_version="$1"
|
||||
local conda_base
|
||||
conda_base=$(conda info --base) || { echo "Error obtaining conda base directory"; exit 1; }
|
||||
. "$conda_base/etc/profile.d/conda.sh"
|
||||
|
||||
if ! conda list --name "GreptimeTmpPyO3Env$req_py_version" &> /dev/null; then
|
||||
conda create --yes --name "GreptimeTmpPyO3Env$req_py_version" python="$req_py_version"
|
||||
fi
|
||||
|
||||
conda activate "GreptimeTmpPyO3Env$req_py_version"
|
||||
}
|
||||
|
||||
GREPTIME_BIN_PATH="./greptime"
|
||||
YES="false"
|
||||
|
||||
usage() {
|
||||
echo "Usage:"
|
||||
echo " $0 -f <greptime-bin-path> [-y] <args-pass-to-greptime>"
|
||||
echo "Set $PY_ENV_MAN to 1 to use virtualenv, 2 to use conda"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function parse_args() {
|
||||
while getopts ":f:y" opt; do
|
||||
case $opt in
|
||||
f)
|
||||
GREPTIME_BIN_PATH=$OPTARG
|
||||
;;
|
||||
y)
|
||||
YES="yes"
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
echo "Option -$OPTARG requires an argument." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND -1))
|
||||
|
||||
REST_ARGS=$*
|
||||
|
||||
if [ -z "$GREPTIME_BIN_PATH" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
echo "Run greptime binary at '$GREPTIME_BIN_PATH' (yes=$YES)..."
|
||||
echo "The args pass to greptime: '$REST_ARGS'"
|
||||
}
|
||||
|
||||
# Set library path and pass all arguments to greptime to run it
|
||||
execute_greptime() {
|
||||
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
||||
DYLD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
||||
LD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args $@
|
||||
|
||||
local req_py_version
|
||||
req_py_version=$(get_python_version)
|
||||
readonly req_py_version
|
||||
|
||||
if [[ -z "$req_py_version" ]]; then
|
||||
if $GREPTIME_BIN_PATH --version &> /dev/null; then
|
||||
$GREPTIME_BIN_PATH $REST_ARGS
|
||||
else
|
||||
echo "The 'greptime' binary is not valid or encountered an error."
|
||||
$GREPTIME_BIN_PATH --version
|
||||
exit 1
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "The required version of Python shared library is $req_py_version"
|
||||
|
||||
# if YES exist, assign it to yn, else read from stdin
|
||||
if [[ -z "$YES" ]]; then
|
||||
echo "Now this script will try to install or find correct Python Version"
|
||||
echo "Do you want to continue? (yes/no): "
|
||||
read -r yn
|
||||
else
|
||||
yn="$YES"
|
||||
fi
|
||||
case $yn in
|
||||
[Yy]* ) ;;
|
||||
[Nn]* ) exit;;
|
||||
* ) echo "Please answer yes or no.";;
|
||||
esac
|
||||
|
||||
# if USE_ENV exist, assign it to option
|
||||
# else read from stdin
|
||||
if [[ -z "$PY_ENV_MAN" ]]; then
|
||||
echo "Do you want to use virtualenv or conda? (virtualenv(1)/conda(2)): "
|
||||
read -r option
|
||||
else
|
||||
option="$PY_ENV_MAN"
|
||||
fi
|
||||
|
||||
case $option in
|
||||
1)
|
||||
setup_virtualenv "$req_py_version"
|
||||
;;
|
||||
2)
|
||||
setup_conda_env "$req_py_version"
|
||||
;;
|
||||
*)
|
||||
echo "Please input 1 or 2"; exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
execute_greptime $REST_ARGS
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -6,11 +6,13 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
common-decimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
prost.workspace = true
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_decimal::Decimal128;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
@@ -26,47 +28,71 @@ use datatypes::types::{
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
|
||||
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||
};
|
||||
use greptime_proto::v1;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{self, DdlRequest, IntervalMonthDayNano, QueryRequest, Row, SemanticType};
|
||||
use greptime_proto::v1::{
|
||||
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
|
||||
};
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::v1::column::Values;
|
||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ColumnDataTypeWrapper(ColumnDataType);
|
||||
/// ColumnDataTypeWrapper is a wrapper of ColumnDataType and ColumnDataTypeExtension.
|
||||
/// It could be used to convert with ConcreteDataType.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType,
|
||||
datatype_ext: Option<ColumnDataTypeExtension>,
|
||||
}
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
pub fn try_new(datatype: i32) -> Result<Self> {
|
||||
/// Try to create a ColumnDataTypeWrapper from i32(ColumnDataType) and ColumnDataTypeExtension.
|
||||
pub fn try_new(datatype: i32, datatype_ext: Option<ColumnDataTypeExtension>) -> Result<Self> {
|
||||
let datatype = ColumnDataType::try_from(datatype)
|
||||
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
||||
Ok(Self(datatype))
|
||||
Ok(Self {
|
||||
datatype,
|
||||
datatype_ext,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(datatype: ColumnDataType) -> Self {
|
||||
Self(datatype)
|
||||
/// Create a ColumnDataTypeWrapper from ColumnDataType and ColumnDataTypeExtension.
|
||||
pub fn new(datatype: ColumnDataType, datatype_ext: Option<ColumnDataTypeExtension>) -> Self {
|
||||
Self {
|
||||
datatype,
|
||||
datatype_ext,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the ColumnDataType.
|
||||
pub fn datatype(&self) -> ColumnDataType {
|
||||
self.0
|
||||
self.datatype
|
||||
}
|
||||
|
||||
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||
(self.datatype, self.datatype_ext.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
fn from(datatype: ColumnDataTypeWrapper) -> Self {
|
||||
match datatype.0 {
|
||||
fn from(datatype_wrapper: ColumnDataTypeWrapper) -> Self {
|
||||
match datatype_wrapper.datatype {
|
||||
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
|
||||
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
|
||||
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
|
||||
@@ -109,6 +135,100 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ConcreteDataType::duration_microsecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||
ColumnDataType::Decimal128 => {
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||
{
|
||||
ConcreteDataType::decimal128_datatype(d.precision as u8, d.scale as i8)
|
||||
} else {
|
||||
ConcreteDataType::decimal128_default_datatype()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This macro is used to generate datatype functions
|
||||
/// with lower style for ColumnDataTypeWrapper.
|
||||
///
|
||||
///
|
||||
/// For example: we can use `ColumnDataTypeWrapper::int8_datatype()`,
|
||||
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::Int8`.
|
||||
macro_rules! impl_column_type_functions {
|
||||
($($Type: ident), +) => {
|
||||
paste! {
|
||||
impl ColumnDataTypeWrapper {
|
||||
$(
|
||||
pub fn [<$Type:lower _datatype>]() -> ColumnDataTypeWrapper {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::$Type,
|
||||
datatype_ext: None,
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This macro is used to generate datatype functions
|
||||
/// with snake style for ColumnDataTypeWrapper.
|
||||
///
|
||||
///
|
||||
/// For example: we can use `ColumnDataTypeWrapper::duration_second_datatype()`,
|
||||
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::DurationSecond`.
|
||||
macro_rules! impl_column_type_functions_with_snake {
|
||||
($($TypeName: ident), +) => {
|
||||
paste!{
|
||||
impl ColumnDataTypeWrapper {
|
||||
$(
|
||||
pub fn [<$TypeName:snake _datatype>]() -> ColumnDataTypeWrapper {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::$TypeName,
|
||||
datatype_ext: None,
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_column_type_functions!(
|
||||
Boolean, Uint8, Uint16, Uint32, Uint64, Int8, Int16, Int32, Int64, Float32, Float64, Binary,
|
||||
Date, Datetime, String
|
||||
);
|
||||
|
||||
impl_column_type_functions_with_snake!(
|
||||
TimestampSecond,
|
||||
TimestampMillisecond,
|
||||
TimestampMicrosecond,
|
||||
TimestampNanosecond,
|
||||
TimeSecond,
|
||||
TimeMillisecond,
|
||||
TimeMicrosecond,
|
||||
TimeNanosecond,
|
||||
IntervalYearMonth,
|
||||
IntervalDayTime,
|
||||
IntervalMonthDayNano,
|
||||
DurationSecond,
|
||||
DurationMillisecond,
|
||||
DurationMicrosecond,
|
||||
DurationNanosecond
|
||||
);
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
pub fn decimal128_datatype(precision: i32, scale: i32) -> Self {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::Decimal128,
|
||||
datatype_ext: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision,
|
||||
scale,
|
||||
})),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -117,7 +237,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
||||
let datatype = ColumnDataTypeWrapper(match datatype {
|
||||
let column_datatype = match datatype {
|
||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||
@@ -156,13 +276,30 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||
},
|
||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
Ok(datatype)
|
||||
};
|
||||
let datatype_extension = match column_datatype {
|
||||
ColumnDataType::Decimal128 => {
|
||||
datatype
|
||||
.as_decimal128()
|
||||
.map(|decimal_type| ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision: decimal_type.precision() as i32,
|
||||
scale: decimal_type.scale() as i32,
|
||||
})),
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(Self {
|
||||
datatype: column_datatype,
|
||||
datatype_ext: datatype_extension,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,6 +425,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Decimal128 => Values {
|
||||
decimal128_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,6 +482,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
@@ -381,17 +523,29 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
}
|
||||
|
||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
||||
pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
|
||||
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
||||
let interval = Interval::from_i128(v);
|
||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months,
|
||||
days,
|
||||
nanoseconds,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
||||
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
||||
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
||||
let value = v.val();
|
||||
v1::Decimal128 {
|
||||
hi: (value >> 64) as i64,
|
||||
lo: value as i64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pb_value_to_value_ref<'a>(
|
||||
value: &'a v1::Value,
|
||||
datatype_ext: &'a Option<ColumnDataTypeExtension>,
|
||||
) -> ValueRef<'a> {
|
||||
let Some(value) = &value.value_data else {
|
||||
return ValueRef::Null;
|
||||
};
|
||||
@@ -436,6 +590,28 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||
ValueData::Decimal128Value(v) => {
|
||||
// get precision and scale from datatype_extension
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|column_ext| column_ext.type_ext.as_ref())
|
||||
{
|
||||
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||
v.hi,
|
||||
v.lo,
|
||||
d.precision as u8,
|
||||
d.scale as i8,
|
||||
))
|
||||
} else {
|
||||
// If the precision and scale are not set, use the default value.
|
||||
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||
v.hi,
|
||||
v.lo,
|
||||
DECIMAL128_MAX_PRECISION,
|
||||
DECIMAL128_DEFAULT_SCALE,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -522,6 +698,11 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
values.duration_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||
values.decimal128_values.iter().map(|x| {
|
||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||
}),
|
||||
)),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -692,6 +873,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Decimal128(d) => values
|
||||
.decimal128_values
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
Value::Decimal128(Decimal128::from_value_precision_scale(
|
||||
v.hi,
|
||||
v.lo,
|
||||
d.precision(),
|
||||
d.scale(),
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -704,12 +897,14 @@ pub fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool
|
||||
}
|
||||
|
||||
/// Returns true if the pb type value is valid.
|
||||
pub fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
|
||||
let Ok(column_type) = ColumnDataType::try_from(type_value) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
is_column_type_eq(column_type, expect_type)
|
||||
pub fn is_column_type_value_eq(
|
||||
type_value: i32,
|
||||
type_extension: Option<ColumnDataTypeExtension>,
|
||||
expect_type: &ConcreteDataType,
|
||||
) -> bool {
|
||||
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||
.map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Convert value into proto's value.
|
||||
@@ -816,13 +1011,19 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Decimal128(v) => {
|
||||
let (hi, lo) = v.split_value();
|
||||
v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
|
||||
}
|
||||
}
|
||||
Value::List(_) => return None,
|
||||
};
|
||||
|
||||
Some(proto_value)
|
||||
}
|
||||
|
||||
/// Returns the [ColumnDataType] of the value.
|
||||
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||
///
|
||||
/// If value is null, returns `None`.
|
||||
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
@@ -857,65 +1058,11 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||
};
|
||||
Some(value_type)
|
||||
}
|
||||
|
||||
/// Convert [ConcreteDataType] to [ColumnDataType].
|
||||
pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataType> {
|
||||
let column_data_type = match data_type {
|
||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||
ConcreteDataType::Int32(_) => ColumnDataType::Int32,
|
||||
ConcreteDataType::Int64(_) => ColumnDataType::Int64,
|
||||
ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
|
||||
ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
|
||||
ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
|
||||
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
|
||||
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
|
||||
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
|
||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
ConcreteDataType::Timestamp(TimestampType::Second(_)) => ColumnDataType::TimestampSecond,
|
||||
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => {
|
||||
ColumnDataType::TimestampMillisecond
|
||||
}
|
||||
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => {
|
||||
ColumnDataType::TimestampMicrosecond
|
||||
}
|
||||
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => {
|
||||
ColumnDataType::TimestampNanosecond
|
||||
}
|
||||
ConcreteDataType::Time(TimeType::Second(_)) => ColumnDataType::TimeSecond,
|
||||
ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
|
||||
ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
|
||||
ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
|
||||
ConcreteDataType::Duration(DurationType::Second(_)) => ColumnDataType::DurationSecond,
|
||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => {
|
||||
ColumnDataType::DurationMillisecond
|
||||
}
|
||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => {
|
||||
ColumnDataType::DurationMicrosecond
|
||||
}
|
||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => {
|
||||
ColumnDataType::DurationNanosecond
|
||||
}
|
||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => ColumnDataType::IntervalYearMonth,
|
||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
|
||||
ColumnDataType::IntervalMonthDayNano
|
||||
}
|
||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
return None
|
||||
}
|
||||
};
|
||||
|
||||
Some(column_data_type)
|
||||
}
|
||||
|
||||
pub fn vectors_to_rows<'a>(
|
||||
columns: impl Iterator<Item = &'a VectorRef>,
|
||||
row_count: usize,
|
||||
@@ -974,20 +1121,15 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Decimal128(v) => {
|
||||
let (hi, lo) = v.split_value();
|
||||
Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
|
||||
}
|
||||
Value::List(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the column type is equal to expected type.
|
||||
fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
|
||||
if let Some(expect) = to_column_data_type(expect_type) {
|
||||
column_type == expect
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -1081,189 +1223,204 @@ mod tests {
|
||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||
let values = values.duration_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||
let values = values.decimal128_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concrete_datatype_from_column_datatype() {
|
||||
assert_eq!(
|
||||
ConcreteDataType::boolean_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Boolean).into()
|
||||
ColumnDataTypeWrapper::boolean_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int8).into()
|
||||
ColumnDataTypeWrapper::int8_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int16).into()
|
||||
ColumnDataTypeWrapper::int16_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int32).into()
|
||||
ColumnDataTypeWrapper::int32_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int64).into()
|
||||
ColumnDataTypeWrapper::int64_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint8).into()
|
||||
ColumnDataTypeWrapper::uint8_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint16).into()
|
||||
ColumnDataTypeWrapper::uint16_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint32).into()
|
||||
ColumnDataTypeWrapper::uint32_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint64).into()
|
||||
ColumnDataTypeWrapper::uint64_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float32).into()
|
||||
ColumnDataTypeWrapper::float32_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float64).into()
|
||||
ColumnDataTypeWrapper::float64_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::binary_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Binary).into()
|
||||
ColumnDataTypeWrapper::binary_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::String).into()
|
||||
ColumnDataTypeWrapper::string_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::date_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Date).into()
|
||||
ColumnDataTypeWrapper::date_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
|
||||
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
|
||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
|
||||
ColumnDataTypeWrapper::time_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
|
||||
ColumnDataTypeWrapper::interval_day_time_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
|
||||
ColumnDataTypeWrapper::interval_year_month_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond).into()
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2),
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_datatype_from_concrete_datatype() {
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Boolean),
|
||||
ColumnDataTypeWrapper::boolean_datatype(),
|
||||
ConcreteDataType::boolean_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int8),
|
||||
ColumnDataTypeWrapper::int8_datatype(),
|
||||
ConcreteDataType::int8_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int16),
|
||||
ColumnDataTypeWrapper::int16_datatype(),
|
||||
ConcreteDataType::int16_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int32),
|
||||
ColumnDataTypeWrapper::int32_datatype(),
|
||||
ConcreteDataType::int32_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int64),
|
||||
ColumnDataTypeWrapper::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint8),
|
||||
ColumnDataTypeWrapper::uint8_datatype(),
|
||||
ConcreteDataType::uint8_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint16),
|
||||
ColumnDataTypeWrapper::uint16_datatype(),
|
||||
ConcreteDataType::uint16_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint32),
|
||||
ColumnDataTypeWrapper::uint32_datatype(),
|
||||
ConcreteDataType::uint32_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint64),
|
||||
ColumnDataTypeWrapper::uint64_datatype(),
|
||||
ConcreteDataType::uint64_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float32),
|
||||
ColumnDataTypeWrapper::float32_datatype(),
|
||||
ConcreteDataType::float32_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float64),
|
||||
ColumnDataTypeWrapper::float64_datatype(),
|
||||
ConcreteDataType::float64_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Binary),
|
||||
ColumnDataTypeWrapper::binary_datatype(),
|
||||
ConcreteDataType::binary_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::String),
|
||||
ColumnDataTypeWrapper::string_datatype(),
|
||||
ConcreteDataType::string_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Date),
|
||||
ColumnDataTypeWrapper::date_datatype(),
|
||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Datetime),
|
||||
ColumnDataTypeWrapper::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
|
||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
|
||||
ColumnDataTypeWrapper::interval_year_month_datatype(),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
|
||||
ColumnDataTypeWrapper::interval_day_time_datatype(),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype(),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond),
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||
ConcreteDataType::decimal128_datatype(10, 2)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||
assert!(result.is_err());
|
||||
assert_eq!(
|
||||
@@ -1290,6 +1447,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1331,6 +1489,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1372,6 +1531,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1416,6 +1576,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1460,6 +1621,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
..Default::default()
|
||||
};
|
||||
let row_count = 4;
|
||||
|
||||
@@ -1617,17 +1779,17 @@ mod tests {
|
||||
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
||||
Values {
|
||||
interval_month_day_nano_values: vec![
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months: 1,
|
||||
days: 2,
|
||||
nanoseconds: 3,
|
||||
},
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months: 5,
|
||||
days: 6,
|
||||
nanoseconds: 7,
|
||||
},
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months: 9,
|
||||
days: 10,
|
||||
nanoseconds: 11,
|
||||
@@ -1859,4 +2021,33 @@ mod tests {
|
||||
assert_eq!(values[6], ValueData::DateValue(30));
|
||||
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_column_type_value_eq() {
|
||||
// test column type eq
|
||||
let column1 = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
bool_values: vec![false, true, true],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
datatype_extension: None,
|
||||
};
|
||||
assert!(is_column_type_value_eq(
|
||||
column1.datatype,
|
||||
column1.datatype_extension,
|
||||
&ConcreteDataType::boolean_datatype(),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_to_pb_decimal128() {
|
||||
let decimal = Decimal128::new(123, 3, 1);
|
||||
let pb_decimal = convert_to_pb_decimal128(decimal);
|
||||
assert_eq!(pb_decimal.lo, 123);
|
||||
assert_eq!(pb_decimal.hi, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,10 @@ use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
|
||||
let data_type = ColumnDataTypeWrapper::try_new(
|
||||
column_def.data_type,
|
||||
column_def.datatype_extension.clone(),
|
||||
)?;
|
||||
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
|
||||
@@ -49,5 +49,4 @@ chrono.workspace = true
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
storage.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -180,7 +180,7 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(""))]
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
@@ -216,7 +216,7 @@ pub enum Error {
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(""))]
|
||||
#[snafu(display("DataFusion error"))]
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
|
||||
@@ -202,7 +202,7 @@ impl InformationSchemaColumnsBuilder {
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
column.data_type.name(),
|
||||
&column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ use std::sync::{Arc, Weak};
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
@@ -55,7 +54,6 @@ pub struct KvBackendCatalogManager {
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
}
|
||||
@@ -76,16 +74,11 @@ impl CacheInvalidator for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
backend: KvBackendRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Arc<Self> {
|
||||
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
cache_invalidator,
|
||||
datanode_manager,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
},
|
||||
@@ -99,10 +92,6 @@ impl KvBackendCatalogManager {
|
||||
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
||||
self.datanode_manager.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -46,6 +46,7 @@ async fn run() {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "key".to_string(),
|
||||
@@ -54,6 +55,7 @@ async fn run() {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "value".to_string(),
|
||||
@@ -62,6 +64,7 @@ async fn run() {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "timestamp".to_string(),
|
||||
@@ -78,7 +81,7 @@ async fn run() {
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let result = db.logical_plan(logical, 0).await.unwrap();
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamAdaptor;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -147,21 +148,21 @@ impl Database {
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = self.to_rpc_request(request, 0);
|
||||
let request = self.to_rpc_request(request);
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_rpc_request(&self, request: Request, trace_id: u64) -> GreptimeRequest {
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
trace_id,
|
||||
span_id: 0,
|
||||
// TODO(Taylor-lagrange): add client grpc tracing
|
||||
tracing_context: W3cTrace::new(),
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
@@ -172,23 +173,17 @@ impl Database {
|
||||
S: AsRef<str>,
|
||||
{
|
||||
let _timer = metrics::METRIC_GRPC_SQL.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>, trace_id: u64) -> Result<Output> {
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}),
|
||||
trace_id,
|
||||
)
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -200,68 +195,53 @@ impl Database {
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_ALTER.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request, trace_id: u64) -> Result<Output> {
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = metrics::METRIC_GRPC_DO_GET.start_timer();
|
||||
let request = self.to_rpc_request(request, trace_id);
|
||||
let request = self.to_rpc_request(request);
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
@@ -18,7 +18,7 @@ async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
|
||||
@@ -208,7 +208,8 @@ async fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
||||
let _guard =
|
||||
common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts, opts.node_id());
|
||||
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
|
||||
use client::client_manager::DatanodeClients;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::ErrorExt;
|
||||
@@ -176,7 +175,7 @@ impl Repl {
|
||||
.encode(&plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec(), 0).await
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
} else {
|
||||
self.database.sql(&sql).await
|
||||
}
|
||||
@@ -250,13 +249,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
|
||||
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let catalog_list = KvBackendCatalogManager::new(
|
||||
cached_meta_backend.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
datanode_clients,
|
||||
);
|
||||
let catalog_list =
|
||||
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
|
||||
@@ -12,15 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::config::DatanodeOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
@@ -89,7 +91,7 @@ struct StartCommand {
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
@@ -177,7 +179,27 @@ impl StartCommand {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = DatanodeBuilder::new(opts, None, plugins)
|
||||
let node_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
|
||||
let meta_config = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = datanode::heartbeat::new_metasrv_client(node_id, meta_config)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(MetaKvBackend {
|
||||
client: Arc::new(meta_client.clone()),
|
||||
});
|
||||
|
||||
let datanode = DatanodeBuilder::new(opts, plugins)
|
||||
.with_meta_client(meta_client)
|
||||
.with_kv_backend(meta_backend)
|
||||
.enable_region_server_service()
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -192,7 +214,7 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use datanode::config::{FileConfig, ObjectStoreConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
@@ -232,16 +254,6 @@ mod tests {
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
compress = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -294,23 +306,6 @@ mod tests {
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 3,
|
||||
max_files_in_level0: 7,
|
||||
max_purge_tasks: 32,
|
||||
},
|
||||
options.storage.compaction,
|
||||
);
|
||||
assert_eq!(
|
||||
RegionManifestConfig {
|
||||
checkpoint_margin: Some(9),
|
||||
gc_duration: Some(Duration::from_secs(7)),
|
||||
compress: true
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
}
|
||||
@@ -387,18 +382,12 @@ mod tests {
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -409,26 +398,24 @@ mod tests {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
(
|
||||
// storage.manifest.gc_duration = 9s
|
||||
// wal.purge_interval = 1m
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
"wal".to_uppercase(),
|
||||
"purge_interval".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("9s"),
|
||||
Some("1m"),
|
||||
),
|
||||
(
|
||||
// storage.compaction.max_purge_tasks = 99
|
||||
// wal.read_batch_size = 100
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"compaction".to_uppercase(),
|
||||
"max_purge_tasks".to_uppercase(),
|
||||
"wal".to_uppercase(),
|
||||
"read_batch_size".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
Some("100"),
|
||||
),
|
||||
(
|
||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
@@ -456,10 +443,7 @@ mod tests {
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(9))
|
||||
);
|
||||
assert_eq!(opts.wal.read_batch_size, 100,);
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
@@ -470,19 +454,13 @@ mod tests {
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
||||
assert_eq!(opts.wal.purge_interval, Duration::from_secs(60 * 10));
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.checkpoint_margin,
|
||||
DatanodeOptions::default()
|
||||
.storage
|
||||
.manifest
|
||||
.checkpoint_margin
|
||||
);
|
||||
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -37,6 +37,12 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
location: Location,
|
||||
@@ -240,9 +246,11 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::IterStream { source, .. }
|
||||
| Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
@@ -253,6 +261,7 @@ impl ErrorExt for Error {
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -12,18 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::CachedMetaKvBackend;
|
||||
use clap::Parser;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result, StartFrontendSnafu};
|
||||
use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -100,7 +108,7 @@ pub struct StartCommand {
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
@@ -196,10 +204,38 @@ impl StartCommand {
|
||||
logging::info!("Frontend start command: {:#?}", self);
|
||||
logging::info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client'",
|
||||
})?;
|
||||
let meta_client = FeInstance::create_meta_client(meta_client_options)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(meta_backend.clone())),
|
||||
]);
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
meta_client.clone(),
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
meta_backend.clone(),
|
||||
Arc::new(DatanodeClients::default()),
|
||||
meta_client,
|
||||
)
|
||||
.with_cache_invalidator(meta_backend)
|
||||
.with_plugin(plugins)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(opts)
|
||||
.await
|
||||
|
||||
@@ -100,6 +100,9 @@ struct StartCommand {
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
||||
env_prefix: String,
|
||||
/// The working home directory of this metasrv instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -152,6 +155,10 @@ impl StartCommand {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
@@ -166,7 +173,12 @@ impl StartCommand {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
let instance = MetaSrvInstance::new(opts, plugins)
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
let instance = MetaSrvInstance::new(opts, plugins, metasrv)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
#[derive(Serialize)]
|
||||
#[derive(Serialize, Debug)]
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -133,12 +133,20 @@ impl Options {
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> Option<String> {
|
||||
match self {
|
||||
Options::Metasrv(_) | Options::Cli(_) => None,
|
||||
Options::Datanode(opt) => opt.node_id.map(|x| x.to_string()),
|
||||
Options::Frontend(opt) => opt.node_id.clone(),
|
||||
Options::Standalone(opt) => opt.frontend.node_id.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
||||
@@ -170,11 +178,6 @@ mod tests {
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -185,17 +188,6 @@ mod tests {
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
[
|
||||
(
|
||||
// storage.manifest.checkpoint_margin = 99
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"checkpoint_margin".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
),
|
||||
(
|
||||
// storage.type = S3
|
||||
[
|
||||
@@ -216,17 +208,6 @@ mod tests {
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("mybucket"),
|
||||
),
|
||||
(
|
||||
// storage.manifest.gc_duration = 42s
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("42s"),
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
[
|
||||
@@ -257,17 +238,12 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
|
||||
match opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
}
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(42))
|
||||
);
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
|
||||
@@ -15,21 +15,23 @@
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
||||
use common_meta::cache_invalidator::DummyCacheInvalidator;
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::ddl::DdlTaskExecutorRef;
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::standalone::StandaloneTableMetadataCreator;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
@@ -42,9 +44,9 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StopProcedureManagerSnafu,
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result,
|
||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
StartProcedureManagerSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
@@ -156,6 +158,7 @@ impl StandaloneOptions {
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
region_engine: self.region_engine,
|
||||
rpc_addr: self.grpc.addr,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -169,9 +172,7 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
self.datanode.start_telemetry();
|
||||
|
||||
self.procedure_manager
|
||||
.start()
|
||||
@@ -229,6 +230,9 @@ struct StartCommand {
|
||||
user_provider: Option<String>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||
env_prefix: String,
|
||||
/// The working home directory of this standalone instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -259,6 +263,10 @@ impl StartCommand {
|
||||
opts.http.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
@@ -325,10 +333,8 @@ impl StartCommand {
|
||||
let dn_opts = opts.datanode.clone();
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!(
|
||||
"Standalone frontend options: {:#?}, datanode options: {:#?}",
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
info!("Building standalone instance with {opts:#?}");
|
||||
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||
@@ -344,38 +350,25 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let datanode = DatanodeBuilder::new(
|
||||
dn_opts.clone(),
|
||||
Some(kv_backend.clone()),
|
||||
Default::default(),
|
||||
)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let region_server = datanode.region_server();
|
||||
let builder =
|
||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
||||
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
kv_backend.clone(),
|
||||
Arc::new(DummyKvCacheInvalidator),
|
||||
Arc::new(StandaloneDatanodeManager(region_server.clone())),
|
||||
);
|
||||
|
||||
catalog_manager
|
||||
.table_metadata_manager_ref()
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
|
||||
// TODO: build frontend instance like in distributed mode
|
||||
let mut frontend = build_frontend(
|
||||
fe_plugins,
|
||||
kv_backend,
|
||||
procedure_manager.clone(),
|
||||
catalog_manager,
|
||||
region_server,
|
||||
datanode_manager.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
|
||||
.with_plugin(fe_plugins)
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend
|
||||
.build_servers(opts)
|
||||
.await
|
||||
@@ -387,26 +380,41 @@ impl StartCommand {
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
plugins: Plugins,
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
region_server: RegionServer,
|
||||
) -> Result<FeInstance> {
|
||||
let frontend_instance = FeInstance::try_new_standalone(
|
||||
kv_backend,
|
||||
procedure_manager,
|
||||
catalog_manager,
|
||||
plugins,
|
||||
region_server,
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
Ok(frontend_instance)
|
||||
async fn create_ddl_task_executor(
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Result<DdlTaskExecutorRef> {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
procedure_manager,
|
||||
datanode_manager,
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
Arc::new(StandaloneTableMetadataCreator::new(kv_backend)),
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(ddl_task_executor)
|
||||
}
|
||||
|
||||
async fn create_table_metadata_manager(
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Result<TableMetadataManagerRef> {
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||
|
||||
table_metadata_manager
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
|
||||
Ok(table_metadata_manager)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -21,11 +21,13 @@ common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-runtime.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store.workspace = true
|
||||
orc-rust = "0.2"
|
||||
parquet.workspace = true
|
||||
paste = "1.0"
|
||||
regex = "1.7"
|
||||
serde.workspace = true
|
||||
|
||||
@@ -26,7 +26,9 @@ use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum CompressionType {
|
||||
/// Gzip-ed file
|
||||
Gzip,
|
||||
|
||||
@@ -166,6 +166,14 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Buffered writer closed"))]
|
||||
BufferedWriterClosed { location: Location },
|
||||
|
||||
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||
WriteParquet {
|
||||
path: String,
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: parquet::errors::ParquetError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -178,7 +186,8 @@ impl ErrorExt for Error {
|
||||
| ListObjects { .. }
|
||||
| ReadObject { .. }
|
||||
| WriteObject { .. }
|
||||
| AsyncWrite { .. } => StatusCode::StorageUnavailable,
|
||||
| AsyncWrite { .. }
|
||||
| WriteParquet { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| UnsupportedCompressionType { .. }
|
||||
@@ -231,6 +240,7 @@ impl ErrorExt for Error {
|
||||
InvalidConnection { location, .. } => Some(*location),
|
||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
||||
UnsupportedFormat { location, .. } => Some(*location),
|
||||
WriteParquet { location, .. } => Some(*location),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,11 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::Schema;
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
||||
use datafusion::error::Result as DatafusionResult;
|
||||
@@ -26,11 +28,15 @@ use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
||||
use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::StreamExt;
|
||||
use object_store::{ObjectStore, Reader};
|
||||
use parquet::basic::{Compression, ZstdLevel};
|
||||
use parquet::file::properties::WriterProperties;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
@@ -156,6 +162,103 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Parquet writer that buffers row groups in memory and writes buffered data to an underlying
|
||||
/// storage by chunks to reduce memory consumption.
|
||||
pub struct BufferedWriter {
|
||||
inner: InnerBufferedWriter,
|
||||
}
|
||||
|
||||
type InnerBufferedWriter = LazyBufferedWriter<
|
||||
object_store::Writer,
|
||||
ArrowWriter<SharedBuffer>,
|
||||
Box<
|
||||
dyn FnMut(
|
||||
String,
|
||||
)
|
||||
-> Pin<Box<dyn Future<Output = error::Result<object_store::Writer>> + Send>>
|
||||
+ Send,
|
||||
>,
|
||||
>;
|
||||
|
||||
impl BufferedWriter {
|
||||
pub async fn try_new(
|
||||
path: String,
|
||||
store: ObjectStore,
|
||||
arrow_schema: SchemaRef,
|
||||
props: Option<WriterProperties>,
|
||||
buffer_threshold: usize,
|
||||
) -> error::Result<Self> {
|
||||
let buffer = SharedBuffer::with_capacity(buffer_threshold);
|
||||
|
||||
let arrow_writer = ArrowWriter::try_new(buffer.clone(), arrow_schema.clone(), props)
|
||||
.context(error::WriteParquetSnafu { path: &path })?;
|
||||
|
||||
Ok(Self {
|
||||
inner: LazyBufferedWriter::new(
|
||||
buffer_threshold,
|
||||
buffer,
|
||||
arrow_writer,
|
||||
&path,
|
||||
Box::new(move |path| {
|
||||
let store = store.clone();
|
||||
Box::pin(async move {
|
||||
store
|
||||
.writer(&path)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path })
|
||||
})
|
||||
}),
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
/// Write a record batch to stream writer.
|
||||
pub async fn write(&mut self, arrow_batch: &RecordBatch) -> error::Result<()> {
|
||||
self.inner.write(arrow_batch).await?;
|
||||
self.inner.try_flush(false).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Close parquet writer.
|
||||
///
|
||||
/// Return file metadata and bytes written.
|
||||
pub async fn close(self) -> error::Result<(FileMetaData, u64)> {
|
||||
self.inner.close_with_arrow_writer().await
|
||||
}
|
||||
}
|
||||
|
||||
/// Output the stream to a parquet file.
|
||||
///
|
||||
/// Returns number of rows written.
|
||||
pub async fn stream_to_parquet(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
) -> Result<usize> {
|
||||
let write_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||
.build();
|
||||
let schema = stream.schema();
|
||||
let mut buffered_writer = BufferedWriter::try_new(
|
||||
path.to_string(),
|
||||
store,
|
||||
schema,
|
||||
Some(write_props),
|
||||
threshold,
|
||||
)
|
||||
.await?;
|
||||
let mut rows_written = 0;
|
||||
while let Some(batch) = stream.next().await {
|
||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||
buffered_writer.write(&batch).await?;
|
||||
rows_written += batch.num_rows();
|
||||
}
|
||||
buffered_writer.close().await?;
|
||||
Ok(rows_written)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[package]
|
||||
name = "decimal"
|
||||
name = "common-decimal"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
@@ -43,7 +43,7 @@ const BYTES_TO_OVERFLOW_RUST_DECIMAL: usize = 28;
|
||||
/// **precision**: the total number of digits in the number, it's range is \[1, 38\].
|
||||
///
|
||||
/// **scale**: the number of digits to the right of the decimal point, it's range is \[0, precision\].
|
||||
#[derive(Debug, Default, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
pub struct Decimal128 {
|
||||
value: i128,
|
||||
precision: u8,
|
||||
@@ -51,8 +51,18 @@ pub struct Decimal128 {
|
||||
}
|
||||
|
||||
impl Decimal128 {
|
||||
/// Create a new Decimal128 from i128, precision and scale.
|
||||
pub fn new_unchecked(value: i128, precision: u8, scale: i8) -> Self {
|
||||
/// Create a new Decimal128 from i128, precision and scale without any validation.
|
||||
pub fn new(value: i128, precision: u8, scale: i8) -> Self {
|
||||
// debug assert precision and scale is valid
|
||||
debug_assert!(
|
||||
precision > 0 && precision <= DECIMAL128_MAX_PRECISION,
|
||||
"precision should be in [1, {}]",
|
||||
DECIMAL128_MAX_PRECISION
|
||||
);
|
||||
debug_assert!(
|
||||
scale >= 0 && scale <= precision as i8,
|
||||
"scale should be in [0, precision]"
|
||||
);
|
||||
Self {
|
||||
value,
|
||||
precision,
|
||||
@@ -60,6 +70,7 @@ impl Decimal128 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Try new Decimal128 from i128, precision and scale with validation.
|
||||
pub fn try_new(value: i128, precision: u8, scale: i8) -> error::Result<Self> {
|
||||
// make sure the precision and scale is valid.
|
||||
valid_precision_and_scale(precision, scale)?;
|
||||
@@ -70,6 +81,7 @@ impl Decimal128 {
|
||||
})
|
||||
}
|
||||
|
||||
/// Return underlying value without precision and scale
|
||||
pub fn val(&self) -> i128 {
|
||||
self.value
|
||||
}
|
||||
@@ -84,10 +96,36 @@ impl Decimal128 {
|
||||
self.scale
|
||||
}
|
||||
|
||||
/// Convert to ScalarValue
|
||||
/// Convert to ScalarValue(value,precision,scale)
|
||||
pub fn to_scalar_value(&self) -> (Option<i128>, u8, i8) {
|
||||
(Some(self.value), self.precision, self.scale)
|
||||
}
|
||||
|
||||
/// split the self.value(i128) to (high-64 bit, low-64 bit), and
|
||||
/// the precision, scale information is discarded.
|
||||
///
|
||||
/// Return: (high-64 bit, low-64 bit)
|
||||
pub fn split_value(&self) -> (i64, i64) {
|
||||
((self.value >> 64) as i64, self.value as i64)
|
||||
}
|
||||
|
||||
/// Convert from precision, scale, a i128 value which
|
||||
/// represents by two i64 value(high-64 bit, low-64 bit).
|
||||
pub fn from_value_precision_scale(hi: i64, lo: i64, precision: u8, scale: i8) -> Self {
|
||||
let value = (hi as i128) << 64 | lo as i128;
|
||||
Self::new(value, precision, scale)
|
||||
}
|
||||
}
|
||||
|
||||
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
|
||||
impl Default for Decimal128 {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
value: 0,
|
||||
precision: 1,
|
||||
scale: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Decimal128 {
|
||||
@@ -270,7 +308,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_common_decimal128() {
|
||||
let decimal = Decimal128::new_unchecked(123456789, 9, 3);
|
||||
let decimal = Decimal128::new(123456789, 9, 3);
|
||||
assert_eq!(decimal.to_string(), "123456.789");
|
||||
|
||||
let decimal = Decimal128::try_new(123456789, 9, 0);
|
||||
|
||||
@@ -14,3 +14,5 @@
|
||||
|
||||
pub mod decimal128;
|
||||
pub mod error;
|
||||
|
||||
pub use decimal128::Decimal128;
|
||||
|
||||
@@ -158,6 +158,7 @@ mod tests {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: None,
|
||||
}],
|
||||
@@ -199,6 +200,7 @@ mod tests {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: Some(Location {
|
||||
location_type: LocationType::First.into(),
|
||||
@@ -213,6 +215,7 @@ mod tests {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: Some(Location {
|
||||
location_type: LocationType::After.into(),
|
||||
|
||||
@@ -36,14 +36,16 @@ pub fn to_table_delete_request(
|
||||
values,
|
||||
null_mask,
|
||||
datatype,
|
||||
datatype_extension,
|
||||
..
|
||||
} in request.key_columns
|
||||
{
|
||||
let Some(values) = values else { continue };
|
||||
|
||||
let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
|
||||
.context(ColumnDataTypeSnafu)?
|
||||
.into();
|
||||
let datatype: ConcreteDataType =
|
||||
ColumnDataTypeWrapper::try_new(datatype, datatype_extension)
|
||||
.context(ColumnDataTypeSnafu)?
|
||||
.into();
|
||||
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
|
||||
|
||||
ensure!(
|
||||
|
||||
@@ -119,7 +119,7 @@ mod tests {
|
||||
nullable: bool,
|
||||
) -> error::Result<ColumnSchema> {
|
||||
let datatype_wrapper =
|
||||
ColumnDataTypeWrapper::try_new(datatype).context(ColumnDataTypeSnafu)?;
|
||||
ColumnDataTypeWrapper::try_new(datatype, None).context(ColumnDataTypeSnafu)?;
|
||||
|
||||
Ok(ColumnSchema::new(
|
||||
column_name,
|
||||
@@ -170,7 +170,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "host")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -184,7 +185,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "cpu")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -198,7 +200,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "memory")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -212,7 +215,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "time")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -226,7 +230,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "interval")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -240,7 +245,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "duration")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -254,7 +260,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "ts")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -284,8 +291,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(host_column.column_def.as_ref().unwrap().data_type)
|
||||
.unwrap()
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
host_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
@@ -294,7 +304,8 @@ mod tests {
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
memory_column.column_def.as_ref().unwrap().data_type
|
||||
memory_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -304,8 +315,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(time_column.column_def.as_ref().unwrap().data_type)
|
||||
.unwrap()
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
time_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
@@ -314,7 +328,8 @@ mod tests {
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
interval_column.column_def.as_ref().unwrap().data_type
|
||||
interval_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -326,7 +341,8 @@ mod tests {
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
duration_column.column_def.as_ref().unwrap().data_type
|
||||
duration_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -360,6 +376,7 @@ mod tests {
|
||||
values: Some(host_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let cpu_vals = Values {
|
||||
@@ -372,6 +389,7 @@ mod tests {
|
||||
values: Some(cpu_vals),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_vals = Values {
|
||||
@@ -384,6 +402,7 @@ mod tests {
|
||||
values: Some(mem_vals),
|
||||
null_mask: vec![1],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let time_vals = Values {
|
||||
@@ -396,6 +415,7 @@ mod tests {
|
||||
values: Some(time_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let interval1 = IntervalMonthDayNano {
|
||||
@@ -418,6 +438,7 @@ mod tests {
|
||||
values: Some(interval_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let duration_vals = Values {
|
||||
@@ -430,6 +451,7 @@ mod tests {
|
||||
values: Some(duration_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::DurationMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let ts_vals = Values {
|
||||
@@ -442,6 +464,7 @@ mod tests {
|
||||
values: Some(ts_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
(
|
||||
|
||||
@@ -121,6 +121,7 @@ pub fn build_create_table_expr(
|
||||
default_constraint: vec![],
|
||||
semantic_type,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
};
|
||||
column_defs.push(column_def);
|
||||
}
|
||||
@@ -161,6 +162,7 @@ pub fn extract_new_columns(
|
||||
default_constraint: vec![],
|
||||
semantic_type: expr.semantic_type,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
});
|
||||
AddColumn {
|
||||
column_def,
|
||||
|
||||
@@ -12,18 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::helper::convert_i128_to_interval;
|
||||
use api::helper::{convert_i128_to_interval, convert_to_pb_decimal128};
|
||||
use api::v1::column::Values;
|
||||
use common_base::BitVec;
|
||||
use datatypes::types::{DurationType, IntervalType, TimeType, TimestampType, WrapperType};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
|
||||
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
|
||||
Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
|
||||
UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int16Vector, Int32Vector, Int64Vector,
|
||||
Int8Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt16Vector, UInt32Vector, UInt64Vector,
|
||||
UInt8Vector, VectorRef,
|
||||
};
|
||||
use snafu::OptionExt;
|
||||
|
||||
@@ -237,6 +238,12 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
DurationNanosecondVector,
|
||||
duration_nanosecond_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Decimal128(_),
|
||||
Decimal128Vector,
|
||||
decimal128_values,
|
||||
|x| { convert_to_pb_decimal128(x) }
|
||||
)
|
||||
)
|
||||
}
|
||||
@@ -314,6 +321,17 @@ mod tests {
|
||||
assert_eq!(vec![1, 2, 3], values.duration_second_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_array_decimal128() {
|
||||
let array = Decimal128Vector::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
|
||||
let vals = values(&[Arc::new(array)]).unwrap();
|
||||
(0..3).for_each(|i| {
|
||||
assert_eq!(vals.decimal128_values[i].hi, 0);
|
||||
assert_eq!(vals.decimal128_values[i].lo, i as i64 + 1);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_arrays_string() {
|
||||
let array = StringVector::from(vec![
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use api::helper::values_with_capacity;
|
||||
use api::v1::{Column, ColumnDataType, SemanticType};
|
||||
use api::v1::{Column, ColumnDataType, ColumnDataTypeExtension, SemanticType};
|
||||
use common_base::BitVec;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use snafu::ensure;
|
||||
@@ -50,6 +50,7 @@ impl LinesWriter {
|
||||
column_name,
|
||||
ColumnDataType::TimestampMillisecond,
|
||||
SemanticType::Timestamp,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::TimestampMillisecond as i32,
|
||||
@@ -69,7 +70,8 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_tag(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag);
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag, None);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::String as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -86,8 +88,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_u64(&mut self, column_name: &str, value: u64) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Uint64, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Uint64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Uint64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -104,8 +110,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_i64(&mut self, column_name: &str, value: i64) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Int64, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Int64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Int64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -122,8 +132,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_f64(&mut self, column_name: &str, value: f64) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Float64, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Float64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Float64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -140,8 +154,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_string(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::String, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::String,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::String as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -158,8 +176,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_bool(&mut self, column_name: &str, value: bool) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Boolean, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Boolean,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Boolean as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -201,6 +223,7 @@ impl LinesWriter {
|
||||
column_name: &str,
|
||||
datatype: ColumnDataType,
|
||||
semantic_type: SemanticType,
|
||||
datatype_extension: Option<ColumnDataTypeExtension>,
|
||||
) -> (usize, &mut Column) {
|
||||
let column_names = &mut self.column_name_index;
|
||||
let column_idx = match column_names.get(column_name) {
|
||||
@@ -218,6 +241,7 @@ impl LinesWriter {
|
||||
values: Some(values_with_capacity(datatype, to_insert)),
|
||||
datatype: datatype as i32,
|
||||
null_mask: Vec::default(),
|
||||
datatype_extension,
|
||||
});
|
||||
let _ = column_names.insert(column_name.to_string(), new_idx);
|
||||
new_idx
|
||||
|
||||
@@ -42,5 +42,6 @@ tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
common-procedure = { workspace = true, features = ["testing"] }
|
||||
datatypes.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
@@ -34,6 +35,7 @@ pub mod utils;
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExecutorContext {
|
||||
pub cluster_id: Option<u64>,
|
||||
pub tracing_context: Option<W3cTrace>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -26,6 +26,7 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status,
|
||||
};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, info};
|
||||
use futures::future;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -207,7 +208,7 @@ impl AlterTableProcedure {
|
||||
let request = self.create_alter_region_request(region_id)?;
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Alter(request)),
|
||||
|
||||
@@ -21,6 +21,7 @@ use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -131,6 +132,7 @@ impl CreateTableProcedure {
|
||||
default_constraint: c.default_constraint.clone(),
|
||||
semantic_type: semantic_type as i32,
|
||||
comment: String::new(),
|
||||
datatype_extension: c.datatype_extension.clone(),
|
||||
}),
|
||||
column_id: i as u32,
|
||||
}
|
||||
@@ -199,7 +201,7 @@ impl CreateTableProcedure {
|
||||
for request in requests {
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(request),
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, info};
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -85,6 +86,10 @@ impl DropTableProcedure {
|
||||
))
|
||||
.await?;
|
||||
|
||||
if !exist && self.data.task.drop_if_exists {
|
||||
return Ok(Status::Done);
|
||||
}
|
||||
|
||||
ensure!(
|
||||
exist,
|
||||
error::TableNotFoundSnafu {
|
||||
@@ -157,7 +162,7 @@ impl DropTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
|
||||
@@ -21,6 +21,7 @@ use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -154,7 +155,7 @@ impl TruncateTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{info, tracing};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
@@ -42,9 +43,9 @@ use crate::rpc::ddl::{
|
||||
TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
/// The [DdlManager] provides the ability to execute Ddl.
|
||||
pub struct DdlManager {
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
@@ -54,26 +55,31 @@ pub struct DdlManager {
|
||||
}
|
||||
|
||||
impl DdlManager {
|
||||
pub fn new(
|
||||
/// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
|
||||
pub fn try_new(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_clients: DatanodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
procedure_manager,
|
||||
datanode_manager: datanode_clients,
|
||||
cache_invalidator,
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
}
|
||||
};
|
||||
manager.register_loaders()?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// Returns the [TableMetadataManagerRef].
|
||||
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
/// Returns the [DdlContext]
|
||||
pub fn create_context(&self) -> DdlContext {
|
||||
DdlContext {
|
||||
datanode_manager: self.datanode_manager.clone(),
|
||||
@@ -82,7 +88,7 @@ impl DdlManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_start(&self) -> Result<()> {
|
||||
fn register_loaders(&self) -> Result<()> {
|
||||
let context = self.create_context();
|
||||
|
||||
self.procedure_manager
|
||||
@@ -140,6 +146,8 @@ impl DdlManager {
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes an alter table task.
|
||||
pub async fn submit_alter_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -156,6 +164,8 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a create table task.
|
||||
pub async fn submit_create_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -172,6 +182,8 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a drop table task.
|
||||
pub async fn submit_drop_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -194,6 +206,8 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a truncate table task.
|
||||
pub async fn submit_truncate_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -383,21 +397,108 @@ impl DdlTaskExecutor for DdlManager {
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
info!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => {
|
||||
handle_drop_table_task(self, cluster_id, drop_table_task).await
|
||||
}
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, cluster_id, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
|
||||
let span = ctx
|
||||
.tracing_context
|
||||
.as_ref()
|
||||
.map(TracingContext::from_w3c)
|
||||
.unwrap_or(TracingContext::from_current_span())
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
info!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => {
|
||||
handle_drop_table_task(self, cluster_id, drop_table_task).await
|
||||
}
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, cluster_id, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
|
||||
}
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_procedure::local::LocalManager;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::DdlManager;
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
use crate::datanode_manager::{DatanodeManager, DatanodeRef};
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{TableMetadataAllocator, TableMetadataAllocatorContext};
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::state_store::KvStateStore;
|
||||
|
||||
/// A dummy implemented [DatanodeManager].
|
||||
pub struct DummyDatanodeManager;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DatanodeManager for DummyDatanodeManager {
|
||||
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy implemented [TableMetadataAllocator].
|
||||
pub struct DummyTableMetadataAllocator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMetadataAllocator for DummyTableMetadataAllocator {
|
||||
async fn create(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
_table_info: &mut RawTableInfo,
|
||||
_partitions: &[Partition],
|
||||
) -> Result<(TableId, Vec<RegionRoute>)> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_new() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
let state_store = Arc::new(KvStateStore::new(kv_backend));
|
||||
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
procedure_manager.clone(),
|
||||
Arc::new(DummyDatanodeManager),
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
Arc::new(DummyTableMetadataAllocator),
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
CreateTableProcedure::TYPE_NAME,
|
||||
AlterTableProcedure::TYPE_NAME,
|
||||
DropTableProcedure::TYPE_NAME,
|
||||
TruncateTableProcedure::TYPE_NAME,
|
||||
];
|
||||
|
||||
for loader in expected_loaders {
|
||||
assert!(procedure_manager.contains_loader(loader));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,5 +33,8 @@ pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
|
||||
/// The lease seconds of metasrv leader.
|
||||
pub const META_LEASE_SECS: u64 = 3;
|
||||
|
||||
// In a lease, there are two opportunities for renewal.
|
||||
/// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
/// The default mailbox round-trip timeout.
|
||||
pub const MAILBOX_RTT_SECS: u64 = 1;
|
||||
|
||||
@@ -37,7 +37,7 @@ pub struct HeartbeatResponseHandlerContext {
|
||||
/// HandleControl
|
||||
///
|
||||
/// Controls process of handling heartbeat response.
|
||||
#[derive(PartialEq)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum HandleControl {
|
||||
Continue,
|
||||
Done,
|
||||
|
||||
@@ -30,8 +30,8 @@ pub struct MessageMeta {
|
||||
pub from: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl MessageMeta {
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_test(id: u64, subject: &str, to: &str, from: &str) -> Self {
|
||||
MessageMeta {
|
||||
id,
|
||||
|
||||
@@ -48,6 +48,27 @@ impl Display for RegionIdent {
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of downgrade leader region.
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct DowngradeRegionReply {
|
||||
/// Returns the `last_entry_id` if available.
|
||||
pub last_entry_id: Option<u64>,
|
||||
/// Indicates whether the region exists.
|
||||
pub exists: bool,
|
||||
/// Return error if any during the operation.
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl Display for DowngradeRegionReply {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"(last_entry_id={:?}, exists={}, error={:?})",
|
||||
self.last_entry_id, self.exists, self.error
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct SimpleReply {
|
||||
pub result: bool,
|
||||
@@ -87,20 +108,82 @@ impl OpenRegion {
|
||||
}
|
||||
}
|
||||
|
||||
/// The instruction of downgrading leader region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DowngradeRegion {
|
||||
/// The [RegionId].
|
||||
pub region_id: RegionId,
|
||||
}
|
||||
|
||||
impl Display for DowngradeRegion {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "DowngradeRegion(region_id={})", self.region_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Upgrades a follower region to leader region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpgradeRegion {
|
||||
/// The [RegionId].
|
||||
pub region_id: RegionId,
|
||||
/// The `last_entry_id` of old leader region.
|
||||
pub last_entry_id: Option<u64>,
|
||||
/// The second of waiting for a wal replay.
|
||||
///
|
||||
/// `None` stands for no wait,
|
||||
/// it's helpful to verify whether the leader region is ready.
|
||||
pub wait_for_replay_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Display)]
|
||||
pub enum Instruction {
|
||||
/// Opens a region.
|
||||
///
|
||||
/// - Returns true if a specified region exists.
|
||||
OpenRegion(OpenRegion),
|
||||
/// Closes a region.
|
||||
///
|
||||
/// - Returns true if a specified region does not exist.
|
||||
CloseRegion(RegionIdent),
|
||||
/// Upgrades a region.
|
||||
UpgradeRegion(UpgradeRegion),
|
||||
/// Downgrades a region.
|
||||
DowngradeRegion(DowngradeRegion),
|
||||
/// Invalidates a specified table cache.
|
||||
InvalidateTableIdCache(TableId),
|
||||
/// Invalidates a specified table name index cache.
|
||||
InvalidateTableNameCache(TableName),
|
||||
}
|
||||
|
||||
/// The reply of [UpgradeRegion].
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct UpgradeRegionReply {
|
||||
/// Returns true if `last_entry_id` has been replayed to the latest.
|
||||
pub ready: bool,
|
||||
/// Indicates whether the region exists.
|
||||
pub exists: bool,
|
||||
/// Returns error if any.
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl Display for UpgradeRegionReply {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"(ready={}, exists={}, error={:?})",
|
||||
self.ready, self.exists, self.error
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum InstructionReply {
|
||||
OpenRegion(SimpleReply),
|
||||
CloseRegion(SimpleReply),
|
||||
UpgradeRegion(UpgradeRegionReply),
|
||||
InvalidateTableCache(SimpleReply),
|
||||
DowngradeRegion(DowngradeRegionReply),
|
||||
}
|
||||
|
||||
impl Display for InstructionReply {
|
||||
@@ -108,9 +191,13 @@ impl Display for InstructionReply {
|
||||
match self {
|
||||
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
|
||||
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
|
||||
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
|
||||
Self::InvalidateTableCache(reply) => {
|
||||
write!(f, "InstructionReply::Invalidate({})", reply)
|
||||
}
|
||||
Self::DowngradeRegion(reply) => {
|
||||
write!(f, "InstructionReply::DowngradeRegion({})", reply)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ use crate::DatanodeId;
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
|
||||
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
@@ -584,7 +584,7 @@ impl TableMetadataManager {
|
||||
&self,
|
||||
table_id: TableId,
|
||||
region_info: RegionInfo,
|
||||
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_region_routes: Vec<RegionRoute>,
|
||||
new_region_options: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
@@ -606,7 +606,7 @@ impl TableMetadataManager {
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.build_update_txn(table_id, ¤t_table_route_value, &new_table_route_value)?;
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
|
||||
|
||||
@@ -631,7 +631,7 @@ impl TableMetadataManager {
|
||||
pub async fn update_leader_region_status<F>(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
next_region_route_status: F,
|
||||
) -> Result<()>
|
||||
where
|
||||
@@ -658,11 +658,9 @@ impl TableMetadataManager {
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.build_update_txn(table_id, ¤t_table_route_value, &new_table_route_value)?;
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![update_table_route_txn]);
|
||||
|
||||
let r = self.kv_backend.txn(txn).await?;
|
||||
let r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
@@ -1096,7 +1094,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
table_metadata_manager
|
||||
.update_leader_region_status(table_id, current_table_route_value, |region_route| {
|
||||
.update_leader_region_status(table_id, ¤t_table_route_value, |region_route| {
|
||||
if region_route.leader_status.is_some() {
|
||||
None
|
||||
} else {
|
||||
@@ -1175,7 +1173,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
@@ -1192,7 +1190,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
@@ -1214,7 +1212,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
@@ -1239,7 +1237,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
wrong_table_route_value,
|
||||
&wrong_table_route_value,
|
||||
new_region_routes,
|
||||
&HashMap::new(),
|
||||
)
|
||||
|
||||
@@ -12,11 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -32,6 +27,12 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub mod chroot;
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
pub type KvBackendRef = Arc<dyn KvBackend<Error = Error> + Send + Sync>;
|
||||
|
||||
#[async_trait]
|
||||
|
||||
273
src/common/meta/src/kv_backend/chroot.rs
Normal file
273
src/common/meta/src/kv_backend/chroot.rs
Normal file
@@ -0,0 +1,273 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnResponse};
|
||||
use crate::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub struct ChrootKvBackend {
|
||||
root: Vec<u8>,
|
||||
inner: KvBackendRef,
|
||||
}
|
||||
|
||||
impl ChrootKvBackend {
|
||||
pub fn new(root: Vec<u8>, inner: KvBackendRef) -> ChrootKvBackend {
|
||||
debug_assert!(!root.is_empty());
|
||||
ChrootKvBackend { root, inner }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TxnService for ChrootKvBackend {
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> Result<TxnResponse, Self::Error> {
|
||||
let txn = self.txn_prepend_root(txn);
|
||||
let txn_res = self.inner.txn(txn).await?;
|
||||
Ok(self.chroot_txn_response(txn_res))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvBackend for ChrootKvBackend {
|
||||
fn name(&self) -> &str {
|
||||
self.inner.name()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, mut req: RangeRequest) -> Result<RangeResponse, Self::Error> {
|
||||
req.key = self.key_prepend_root(req.key);
|
||||
req.range_end = self.range_end_prepend_root(req.range_end);
|
||||
let mut res = self.inner.range(req).await?;
|
||||
res.kvs = res
|
||||
.kvs
|
||||
.drain(..)
|
||||
.map(self.chroot_key_value_with())
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn put(&self, mut req: PutRequest) -> Result<PutResponse, Self::Error> {
|
||||
req.key = self.key_prepend_root(req.key);
|
||||
let mut res = self.inner.put(req).await?;
|
||||
res.prev_kv = res.prev_kv.take().map(self.chroot_key_value_with());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn batch_put(&self, mut req: BatchPutRequest) -> Result<BatchPutResponse, Self::Error> {
|
||||
for kv in req.kvs.iter_mut() {
|
||||
kv.key = self.key_prepend_root(kv.key.drain(..).collect());
|
||||
}
|
||||
let mut res = self.inner.batch_put(req).await?;
|
||||
res.prev_kvs = res
|
||||
.prev_kvs
|
||||
.drain(..)
|
||||
.map(self.chroot_key_value_with())
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn batch_get(&self, mut req: BatchGetRequest) -> Result<BatchGetResponse, Self::Error> {
|
||||
req.keys = req
|
||||
.keys
|
||||
.drain(..)
|
||||
.map(|key| self.key_prepend_root(key))
|
||||
.collect();
|
||||
let mut res = self.inner.batch_get(req).await?;
|
||||
res.kvs = res
|
||||
.kvs
|
||||
.drain(..)
|
||||
.map(self.chroot_key_value_with())
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
mut req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||
req.key = self.key_prepend_root(req.key);
|
||||
let mut res = self.inner.compare_and_put(req).await?;
|
||||
res.prev_kv = res.prev_kv.take().map(self.chroot_key_value_with());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
mut req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse, Self::Error> {
|
||||
req.key = self.key_prepend_root(req.key);
|
||||
req.range_end = self.range_end_prepend_root(req.range_end);
|
||||
let mut res = self.inner.delete_range(req).await?;
|
||||
res.prev_kvs = res
|
||||
.prev_kvs
|
||||
.drain(..)
|
||||
.map(self.chroot_key_value_with())
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn batch_delete(
|
||||
&self,
|
||||
mut req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse, Self::Error> {
|
||||
req.keys = req
|
||||
.keys
|
||||
.drain(..)
|
||||
.map(|key| self.key_prepend_root(key))
|
||||
.collect();
|
||||
let mut res = self.inner.batch_delete(req).await?;
|
||||
res.prev_kvs = res
|
||||
.prev_kvs
|
||||
.drain(..)
|
||||
.map(self.chroot_key_value_with())
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl ChrootKvBackend {
|
||||
fn key_strip_root(&self, mut key: Vec<u8>) -> Vec<u8> {
|
||||
let root = &self.root;
|
||||
debug_assert!(
|
||||
key.starts_with(root),
|
||||
"key={}, root={}",
|
||||
String::from_utf8_lossy(&key),
|
||||
String::from_utf8_lossy(root),
|
||||
);
|
||||
key.split_off(root.len())
|
||||
}
|
||||
|
||||
fn chroot_key_value_with(&self) -> impl FnMut(KeyValue) -> KeyValue + '_ {
|
||||
|kv| KeyValue {
|
||||
key: self.key_strip_root(kv.key),
|
||||
value: kv.value,
|
||||
}
|
||||
}
|
||||
fn chroot_txn_response(&self, mut txn_res: TxnResponse) -> TxnResponse {
|
||||
for resp in txn_res.responses.iter_mut() {
|
||||
match resp {
|
||||
TxnOpResponse::ResponsePut(r) => {
|
||||
r.prev_kv = r.prev_kv.take().map(self.chroot_key_value_with());
|
||||
}
|
||||
TxnOpResponse::ResponseGet(r) => {
|
||||
r.kvs = r.kvs.drain(..).map(self.chroot_key_value_with()).collect();
|
||||
}
|
||||
TxnOpResponse::ResponseDelete(r) => {
|
||||
r.prev_kvs = r
|
||||
.prev_kvs
|
||||
.drain(..)
|
||||
.map(self.chroot_key_value_with())
|
||||
.collect();
|
||||
}
|
||||
}
|
||||
}
|
||||
txn_res
|
||||
}
|
||||
|
||||
fn key_prepend_root(&self, mut key: Vec<u8>) -> Vec<u8> {
|
||||
let mut new_key = self.root.clone();
|
||||
new_key.append(&mut key);
|
||||
new_key
|
||||
}
|
||||
|
||||
// see namespace.prefixInterval - https://github.com/etcd-io/etcd/blob/v3.5.10/client/v3/namespace/util.go
|
||||
fn range_end_prepend_root(&self, mut range_end: Vec<u8>) -> Vec<u8> {
|
||||
let root = &self.root;
|
||||
if range_end == [0] {
|
||||
// the edge of the keyspace
|
||||
let mut new_end = root.clone();
|
||||
let mut ok = false;
|
||||
for i in (0..new_end.len()).rev() {
|
||||
new_end[i] = new_end[i].wrapping_add(1);
|
||||
if new_end[i] != 0 {
|
||||
ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
// 0xff..ff => 0x00
|
||||
new_end = vec![0];
|
||||
}
|
||||
new_end
|
||||
} else if !range_end.is_empty() {
|
||||
let mut new_end = root.clone();
|
||||
new_end.append(&mut range_end);
|
||||
new_end
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
fn txn_prepend_root(&self, mut txn: Txn) -> Txn {
|
||||
let op_prepend_root = |op: TxnOp| match op {
|
||||
TxnOp::Put(k, v) => TxnOp::Put(self.key_prepend_root(k), v),
|
||||
TxnOp::Get(k) => TxnOp::Get(self.key_prepend_root(k)),
|
||||
TxnOp::Delete(k) => TxnOp::Delete(self.key_prepend_root(k)),
|
||||
};
|
||||
txn.req.success = txn.req.success.drain(..).map(op_prepend_root).collect();
|
||||
txn.req.failure = txn.req.failure.drain(..).map(op_prepend_root).collect();
|
||||
txn.req.compare = txn
|
||||
.req
|
||||
.compare
|
||||
.drain(..)
|
||||
.map(|cmp| super::txn::Compare {
|
||||
key: self.key_prepend_root(cmp.key),
|
||||
cmp: cmp.cmp,
|
||||
target: cmp.target,
|
||||
})
|
||||
.collect();
|
||||
txn
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::kv_backend::chroot::ChrootKvBackend;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[test]
|
||||
fn test_prefix_key_and_range_end() {
|
||||
fn run_test_case(pfx: &[u8], key: &[u8], end: &[u8], w_key: &[u8], w_end: &[u8]) {
|
||||
let chroot = ChrootKvBackend::new(pfx.into(), Arc::new(MemoryKvBackend::new()));
|
||||
assert_eq!(chroot.key_prepend_root(key.into()), w_key);
|
||||
assert_eq!(chroot.range_end_prepend_root(end.into()), w_end);
|
||||
}
|
||||
|
||||
// single key
|
||||
run_test_case(b"pfx/", b"a", b"", b"pfx/a", b"");
|
||||
|
||||
// range
|
||||
run_test_case(b"pfx/", b"abc", b"def", b"pfx/abc", b"pfx/def");
|
||||
|
||||
// one-sided range (HACK - b'/' + 1 = b'0')
|
||||
run_test_case(b"pfx/", b"abc", b"\0", b"pfx/abc", b"pfx0");
|
||||
|
||||
// one-sided range, end of keyspace
|
||||
run_test_case(b"\xFF\xFF", b"abc", b"\0", b"\xff\xffabc", b"\0");
|
||||
}
|
||||
}
|
||||
@@ -33,36 +33,17 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub struct KvPair<'a>(&'a etcd_client::KeyValue);
|
||||
|
||||
impl<'a> KvPair<'a> {
|
||||
/// Creates a `KvPair` from etcd KeyValue
|
||||
#[inline]
|
||||
pub fn new(kv: &'a etcd_client::KeyValue) -> Self {
|
||||
Self(kv)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_etcd_kv(kv: &etcd_client::KeyValue) -> KeyValue {
|
||||
KeyValue::from(KvPair::new(kv))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<KvPair<'a>> for KeyValue {
|
||||
fn from(kv: KvPair<'a>) -> Self {
|
||||
Self {
|
||||
key: kv.0.key().to_vec(),
|
||||
value: kv.0.value().to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Maximum number of operations permitted in a transaction.
|
||||
// The etcd default configuration's `--max-txn-ops` is 128.
|
||||
//
|
||||
// For more detail, see: https://etcd.io/docs/v3.5/op-guide/configuration/
|
||||
const MAX_TXN_SIZE: usize = 128;
|
||||
|
||||
fn convert_key_value(kv: etcd_client::KeyValue) -> KeyValue {
|
||||
let (key, value) = kv.into_key_value();
|
||||
KeyValue { key, value }
|
||||
}
|
||||
|
||||
pub struct EtcdStore {
|
||||
client: Client,
|
||||
}
|
||||
@@ -87,6 +68,9 @@ impl EtcdStore {
|
||||
async fn do_multi_txn(&self, txn_ops: Vec<TxnOp>) -> Result<Vec<TxnResponse>> {
|
||||
if txn_ops.len() < MAX_TXN_SIZE {
|
||||
// fast path
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&["etcd", "txn"])
|
||||
.start_timer();
|
||||
let txn = Txn::new().and_then(txn_ops);
|
||||
let txn_res = self
|
||||
.client
|
||||
@@ -100,6 +84,9 @@ impl EtcdStore {
|
||||
let txns = txn_ops
|
||||
.chunks(MAX_TXN_SIZE)
|
||||
.map(|part| async move {
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&["etcd", "txn"])
|
||||
.start_timer();
|
||||
let txn = Txn::new().and_then(part);
|
||||
self.client.kv_client().txn(txn).await
|
||||
})
|
||||
@@ -124,7 +111,7 @@ impl KvBackend for EtcdStore {
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
let Get { key, options } = req.try_into()?;
|
||||
|
||||
let res = self
|
||||
let mut res = self
|
||||
.client
|
||||
.kv_client()
|
||||
.get(key, options)
|
||||
@@ -132,9 +119,9 @@ impl KvBackend for EtcdStore {
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
|
||||
let kvs = res
|
||||
.kvs()
|
||||
.iter()
|
||||
.map(KvPair::from_etcd_kv)
|
||||
.take_kvs()
|
||||
.into_iter()
|
||||
.map(convert_key_value)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(RangeResponse {
|
||||
@@ -150,14 +137,14 @@ impl KvBackend for EtcdStore {
|
||||
options,
|
||||
} = req.try_into()?;
|
||||
|
||||
let res = self
|
||||
let mut res = self
|
||||
.client
|
||||
.kv_client()
|
||||
.put(key, value, options)
|
||||
.await
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
|
||||
let prev_kv = res.prev_key().map(KvPair::from_etcd_kv);
|
||||
let prev_kv = res.take_prev_key().map(convert_key_value);
|
||||
Ok(PutResponse { prev_kv })
|
||||
}
|
||||
|
||||
@@ -166,7 +153,7 @@ impl KvBackend for EtcdStore {
|
||||
|
||||
let put_ops = kvs
|
||||
.into_iter()
|
||||
.map(|kv| (TxnOp::put(kv.key, kv.value, options.clone())))
|
||||
.map(|kv| TxnOp::put(kv.key, kv.value, options.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let txn_responses = self.do_multi_txn(put_ops).await?;
|
||||
@@ -175,9 +162,9 @@ impl KvBackend for EtcdStore {
|
||||
for txn_res in txn_responses {
|
||||
for op_res in txn_res.op_responses() {
|
||||
match op_res {
|
||||
TxnOpResponse::Put(put_res) => {
|
||||
if let Some(prev_kv) = put_res.prev_key() {
|
||||
prev_kvs.push(KvPair::from_etcd_kv(prev_kv));
|
||||
TxnOpResponse::Put(mut put_res) => {
|
||||
if let Some(prev_kv) = put_res.take_prev_key().map(convert_key_value) {
|
||||
prev_kvs.push(prev_kv);
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
@@ -193,7 +180,7 @@ impl KvBackend for EtcdStore {
|
||||
|
||||
let get_ops: Vec<_> = keys
|
||||
.into_iter()
|
||||
.map(|k| TxnOp::get(k, options.clone()))
|
||||
.map(|key| TxnOp::get(key, options.clone()))
|
||||
.collect();
|
||||
|
||||
let txn_responses = self.do_multi_txn(get_ops).await?;
|
||||
@@ -201,12 +188,11 @@ impl KvBackend for EtcdStore {
|
||||
let mut kvs = vec![];
|
||||
for txn_res in txn_responses {
|
||||
for op_res in txn_res.op_responses() {
|
||||
let get_res = match op_res {
|
||||
let mut get_res = match op_res {
|
||||
TxnOpResponse::Get(get_res) => get_res,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
kvs.extend(get_res.kvs().iter().map(KvPair::from_etcd_kv));
|
||||
kvs.extend(get_res.take_kvs().into_iter().map(convert_key_value));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,8 +238,8 @@ impl KvBackend for EtcdStore {
|
||||
})?;
|
||||
|
||||
let prev_kv = match op_res {
|
||||
TxnOpResponse::Put(res) => res.prev_key().map(KvPair::from_etcd_kv),
|
||||
TxnOpResponse::Get(res) => res.kvs().first().map(KvPair::from_etcd_kv),
|
||||
TxnOpResponse::Put(mut res) => res.take_prev_key().map(convert_key_value),
|
||||
TxnOpResponse::Get(mut res) => res.take_kvs().into_iter().next().map(convert_key_value),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@@ -263,7 +249,7 @@ impl KvBackend for EtcdStore {
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let Delete { key, options } = req.try_into()?;
|
||||
|
||||
let res = self
|
||||
let mut res = self
|
||||
.client
|
||||
.kv_client()
|
||||
.delete(key, options)
|
||||
@@ -271,9 +257,9 @@ impl KvBackend for EtcdStore {
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
|
||||
let prev_kvs = res
|
||||
.prev_kvs()
|
||||
.iter()
|
||||
.map(KvPair::from_etcd_kv)
|
||||
.take_prev_kvs()
|
||||
.into_iter()
|
||||
.map(convert_key_value)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(DeleteRangeResponse {
|
||||
@@ -289,7 +275,7 @@ impl KvBackend for EtcdStore {
|
||||
|
||||
let delete_ops = keys
|
||||
.into_iter()
|
||||
.map(|k| TxnOp::delete(k, options.clone()))
|
||||
.map(|key| TxnOp::delete(key, options.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let txn_responses = self.do_multi_txn(delete_ops).await?;
|
||||
@@ -297,10 +283,14 @@ impl KvBackend for EtcdStore {
|
||||
for txn_res in txn_responses {
|
||||
for op_res in txn_res.op_responses() {
|
||||
match op_res {
|
||||
TxnOpResponse::Delete(delete_res) => {
|
||||
delete_res.prev_kvs().iter().for_each(|kv| {
|
||||
prev_kvs.push(KvPair::from_etcd_kv(kv));
|
||||
});
|
||||
TxnOpResponse::Delete(mut delete_res) => {
|
||||
delete_res
|
||||
.take_prev_kvs()
|
||||
.into_iter()
|
||||
.map(convert_key_value)
|
||||
.for_each(|kv| {
|
||||
prev_kvs.push(kv);
|
||||
});
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod etcd;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
|
||||
use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse};
|
||||
|
||||
mod etcd;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait TxnService: Sync + Send {
|
||||
type Error: ErrorExt;
|
||||
@@ -122,7 +122,8 @@ pub struct TxnResponse {
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq)]
|
||||
pub struct Txn {
|
||||
req: TxnRequest,
|
||||
// HACK - chroot would modify this field
|
||||
pub(super) req: TxnRequest,
|
||||
c_when: bool,
|
||||
c_then: bool,
|
||||
c_else: bool,
|
||||
|
||||
@@ -54,12 +54,14 @@ impl DdlTask {
|
||||
schema: String,
|
||||
table: String,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
) -> Self {
|
||||
DdlTask::DropTable(DropTableTask {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
drop_if_exists,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -118,6 +120,7 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
|
||||
schema_name: task.schema,
|
||||
table_name: task.table,
|
||||
table_id: Some(api::v1::TableId { id: task.table_id }),
|
||||
drop_if_exists: task.drop_if_exists,
|
||||
}),
|
||||
}),
|
||||
DdlTask::AlterTable(task) => Task::AlterTableTask(PbAlterTableTask {
|
||||
@@ -176,6 +179,8 @@ pub struct DropTableTask {
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: TableId,
|
||||
#[serde(default)]
|
||||
pub drop_if_exists: bool,
|
||||
}
|
||||
|
||||
impl DropTableTask {
|
||||
@@ -214,6 +219,7 @@ impl TryFrom<PbDropTableTask> for DropTableTask {
|
||||
err_msg: "expected table_id",
|
||||
})?
|
||||
.id,
|
||||
drop_if_exists: drop_table.drop_if_exists,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,9 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
|
||||
@@ -23,7 +23,8 @@ use std::time::{Duration, Instant};
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_runtime::{RepeatedTask, TaskFunction};
|
||||
use common_telemetry::{info, logging};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{info, logging, tracing};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::{Mutex as TokioMutex, Notify};
|
||||
@@ -452,9 +453,19 @@ impl LocalManager {
|
||||
DuplicateProcedureSnafu { procedure_id },
|
||||
);
|
||||
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
// Run the root procedure.
|
||||
runner.run().await;
|
||||
// The task was moved to another runtime for execution.
|
||||
// In order not to interrupt tracing, a span needs to be created to continue tracing the current task.
|
||||
runner
|
||||
.run()
|
||||
.trace(
|
||||
tracing_context
|
||||
.attach(tracing::info_span!("LocalManager::submit_root_procedure")),
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
Ok(watcher)
|
||||
@@ -516,6 +527,13 @@ impl LocalManager {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
/// Returns true if contains a specified loader.
|
||||
pub fn contains_loader(&self, name: &str) -> bool {
|
||||
let loaders = self.manager_ctx.loaders.lock().unwrap();
|
||||
loaders.contains_key(name)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -14,21 +14,19 @@ common-error.workspace = true
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
lazy_static.workspace = true
|
||||
once_cell.workspace = true
|
||||
opentelemetry = { version = "0.17", default-features = false, features = [
|
||||
opentelemetry = { version = "0.21.0", default-features = false, features = [
|
||||
"trace",
|
||||
"rt-tokio",
|
||||
] }
|
||||
opentelemetry-jaeger = { version = "0.16", features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = { version = "0.14.0", features = ["tokio"] }
|
||||
opentelemetry-semantic-conventions = "0.13.0"
|
||||
opentelemetry_sdk = { version = "0.21.0", features = ["rt-tokio"] }
|
||||
parking_lot = { version = "0.12" }
|
||||
prometheus.workspace = true
|
||||
rand.workspace = true
|
||||
rs-snowflake = "0.6"
|
||||
serde.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-bunyan-formatter = "0.3"
|
||||
tracing-futures = { version = "0.2", features = ["futures-03"] }
|
||||
tracing-log = "0.1"
|
||||
tracing-opentelemetry = "0.17"
|
||||
tracing-opentelemetry = "0.22.0"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
@@ -16,45 +16,9 @@ pub mod logging;
|
||||
mod macros;
|
||||
pub mod metric;
|
||||
mod panic_hook;
|
||||
pub mod tracing_context;
|
||||
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
pub use logging::{init_default_ut_logging, init_global_logging, trace_id, TRACE_ID};
|
||||
pub use logging::{init_default_ut_logging, init_global_logging};
|
||||
pub use metric::dump_metrics;
|
||||
use once_cell::sync::OnceCell;
|
||||
pub use panic_hook::set_panic_hook;
|
||||
use parking_lot::Mutex;
|
||||
use rand::random;
|
||||
use snowflake::SnowflakeIdBucket;
|
||||
pub use {common_error, tracing, tracing_appender, tracing_futures, tracing_subscriber};
|
||||
|
||||
static NODE_ID: OnceCell<u64> = OnceCell::new();
|
||||
static TRACE_BUCKET: OnceCell<Mutex<SnowflakeIdBucket>> = OnceCell::new();
|
||||
|
||||
pub fn gen_trace_id() -> u64 {
|
||||
let mut bucket = TRACE_BUCKET
|
||||
.get_or_init(|| {
|
||||
// if node_id is not initialized, how about random one?
|
||||
let node_id = NODE_ID.get_or_init(|| 0);
|
||||
info!("initializing bucket with node_id: {}", node_id);
|
||||
let bucket = SnowflakeIdBucket::new(1, (*node_id) as i32);
|
||||
Mutex::new(bucket)
|
||||
})
|
||||
.lock();
|
||||
(*bucket).get_id() as u64
|
||||
}
|
||||
|
||||
pub fn init_node_id(node_id: Option<String>) {
|
||||
let node_id = node_id.map(|id| calculate_hash(&id)).unwrap_or(random());
|
||||
match NODE_ID.set(node_id) {
|
||||
Ok(_) => {}
|
||||
Err(_) => warn!("node_id is already initialized"),
|
||||
}
|
||||
}
|
||||
|
||||
fn calculate_hash<T: Hash>(t: &T) -> u64 {
|
||||
let mut s = DefaultHasher::new();
|
||||
t.hash(&mut s);
|
||||
s.finish()
|
||||
}
|
||||
pub use {common_error, tracing};
|
||||
|
||||
@@ -17,59 +17,54 @@ use std::env;
|
||||
use std::sync::{Arc, Mutex, Once};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use opentelemetry::global;
|
||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||
use opentelemetry::{global, KeyValue};
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_sdk::propagation::TraceContextPropagator;
|
||||
use opentelemetry_sdk::trace::Sampler;
|
||||
use opentelemetry_semantic_conventions::resource;
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use tracing::{event, span, Level};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::fmt::Layer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::{filter, EnvFilter, Registry};
|
||||
|
||||
pub use crate::{debug, error, info, log, trace, warn};
|
||||
pub use crate::{debug, error, info, trace, warn};
|
||||
|
||||
tokio::task_local! {
|
||||
/// Task local trace id. See [trace_id](crate::trace_id) for more details.
|
||||
pub static TRACE_ID: u64;
|
||||
}
|
||||
const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
|
||||
|
||||
/// Get current [TRACE_ID] from tokio [task_local](tokio::task_local) storage.
|
||||
///
|
||||
/// # Usage
|
||||
/// To set current trace id, wrap your async code like this:
|
||||
/// ```rust, no_run
|
||||
/// common_telemetry::TRACE_ID
|
||||
/// .scope(id, async move {
|
||||
/// query_handler
|
||||
/// .do_query(query, self.session.context())
|
||||
/// .await
|
||||
/// })
|
||||
/// .await
|
||||
/// ```
|
||||
/// Then all functions called from this stack will be able to retrieve the trace id
|
||||
/// via this method.
|
||||
pub fn trace_id() -> Option<u64> {
|
||||
TRACE_ID.try_with(|id| Some(*id)).unwrap_or(None)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct LoggingOptions {
|
||||
pub dir: String,
|
||||
pub level: Option<String>,
|
||||
pub enable_jaeger_tracing: bool,
|
||||
pub enable_otlp_tracing: bool,
|
||||
pub otlp_endpoint: Option<String>,
|
||||
pub tracing_sample_ratio: Option<f64>,
|
||||
}
|
||||
|
||||
impl PartialEq for LoggingOptions {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.dir == other.dir
|
||||
&& self.level == other.level
|
||||
&& self.enable_otlp_tracing == other.enable_otlp_tracing
|
||||
&& self.otlp_endpoint == other.otlp_endpoint
|
||||
&& self.tracing_sample_ratio == other.tracing_sample_ratio
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for LoggingOptions {}
|
||||
|
||||
impl Default for LoggingOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
level: None,
|
||||
enable_jaeger_tracing: false,
|
||||
enable_otlp_tracing: false,
|
||||
otlp_endpoint: None,
|
||||
tracing_sample_ratio: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -107,9 +102,10 @@ pub fn init_default_ut_logging() {
|
||||
"unittest",
|
||||
&opts,
|
||||
TracingOptions::default(),
|
||||
None
|
||||
));
|
||||
|
||||
info!("logs dir = {}", dir);
|
||||
crate::info!("logs dir = {}", dir);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -123,11 +119,12 @@ pub fn init_global_logging(
|
||||
app_name: &str,
|
||||
opts: &LoggingOptions,
|
||||
tracing_opts: TracingOptions,
|
||||
node_id: Option<String>,
|
||||
) -> Vec<WorkerGuard> {
|
||||
let mut guards = vec![];
|
||||
let dir = &opts.dir;
|
||||
let level = &opts.level;
|
||||
let enable_jaeger_tracing = opts.enable_jaeger_tracing;
|
||||
let enable_otlp_tracing = opts.enable_otlp_tracing;
|
||||
|
||||
// Enable log compatible layer to convert log record to tracing span.
|
||||
LogTracer::init().expect("log tracer must be valid");
|
||||
@@ -140,7 +137,7 @@ pub fn init_global_logging(
|
||||
// JSON log layer.
|
||||
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
|
||||
let (rolling_writer, rolling_writer_guard) = tracing_appender::non_blocking(rolling_appender);
|
||||
let file_logging_layer = BunyanFormattingLayer::new(app_name.to_string(), rolling_writer);
|
||||
let file_logging_layer = Layer::new().with_writer(rolling_writer);
|
||||
guards.push(rolling_writer_guard);
|
||||
|
||||
// error JSON log layer.
|
||||
@@ -148,8 +145,7 @@ pub fn init_global_logging(
|
||||
RollingFileAppender::new(Rotation::HOURLY, dir, format!("{}-{}", app_name, "err"));
|
||||
let (err_rolling_writer, err_rolling_writer_guard) =
|
||||
tracing_appender::non_blocking(err_rolling_appender);
|
||||
let err_file_logging_layer =
|
||||
BunyanFormattingLayer::new(app_name.to_string(), err_rolling_writer);
|
||||
let err_file_logging_layer = Layer::new().with_writer(err_rolling_writer);
|
||||
guards.push(err_rolling_writer_guard);
|
||||
|
||||
// resolve log level settings from:
|
||||
@@ -164,7 +160,10 @@ pub fn init_global_logging(
|
||||
let filter = targets_string
|
||||
.parse::<filter::Targets>()
|
||||
.expect("error parsing log level string");
|
||||
|
||||
let sampler = opts
|
||||
.tracing_sample_ratio
|
||||
.map(Sampler::TraceIdRatioBased)
|
||||
.unwrap_or(Sampler::AlwaysOn);
|
||||
// Must enable 'tokio_unstable' cfg to use this feature.
|
||||
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -191,7 +190,6 @@ pub fn init_global_logging(
|
||||
|
||||
Registry::default()
|
||||
.with(tokio_console_layer)
|
||||
.with(JsonStorageLayer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR))
|
||||
@@ -203,20 +201,40 @@ pub fn init_global_logging(
|
||||
#[cfg(not(feature = "tokio-console"))]
|
||||
let subscriber = Registry::default()
|
||||
.with(filter)
|
||||
.with(JsonStorageLayer)
|
||||
.with(stdout_logging_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(err_file_logging_layer.with_filter(filter::LevelFilter::ERROR));
|
||||
|
||||
if enable_jaeger_tracing {
|
||||
// Jaeger layer.
|
||||
if enable_otlp_tracing {
|
||||
global::set_text_map_propagator(TraceContextPropagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_pipeline()
|
||||
.with_service_name(app_name)
|
||||
.install_batch(opentelemetry::runtime::Tokio)
|
||||
.expect("install");
|
||||
let jaeger_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
|
||||
let subscriber = subscriber.with(jaeger_layer);
|
||||
// otlp exporter
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(
|
||||
opentelemetry_otlp::new_exporter().tonic().with_endpoint(
|
||||
opts.otlp_endpoint
|
||||
.as_ref()
|
||||
.map(|e| format!("http://{}", e))
|
||||
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
),
|
||||
)
|
||||
.with_trace_config(
|
||||
opentelemetry_sdk::trace::config()
|
||||
.with_sampler(sampler)
|
||||
.with_resource(opentelemetry_sdk::Resource::new(vec![
|
||||
KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
|
||||
KeyValue::new(
|
||||
resource::SERVICE_INSTANCE_ID,
|
||||
node_id.unwrap_or("none".to_string()),
|
||||
),
|
||||
KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
|
||||
KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
|
||||
])),
|
||||
)
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.expect("otlp tracer install failed");
|
||||
let tracing_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
|
||||
let subscriber = subscriber.with(tracing_layer);
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("error setting global tracing subscriber");
|
||||
} else {
|
||||
|
||||
@@ -17,14 +17,12 @@
|
||||
macro_rules! log {
|
||||
// log!(target: "my_target", Level::INFO, "a {} event", "log");
|
||||
(target: $target:expr, $lvl:expr, $($arg:tt)+) => {{
|
||||
let _trace_id = $crate::trace_id();
|
||||
$crate::logging::event!(target: $target, $lvl, trace_id = _trace_id, $($arg)+)
|
||||
$crate::tracing::event!(target: $target, $lvl, $($arg)+)
|
||||
}};
|
||||
|
||||
// log!(Level::INFO, "a log event")
|
||||
($lvl:expr, $($arg:tt)+) => {{
|
||||
let _trace_id = $crate::trace_id();
|
||||
$crate::logging::event!($lvl, trace_id = _trace_id, $($arg)+)
|
||||
$crate::tracing::event!($lvl, $($arg)+)
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -33,14 +31,14 @@ macro_rules! log {
|
||||
macro_rules! error {
|
||||
// error!(target: "my_target", "a {} event", "log")
|
||||
(target: $target:expr, $($arg:tt)+) => ({
|
||||
$crate::log!(target: $target, $crate::logging::Level::ERROR, $($arg)+)
|
||||
$crate::log!(target: $target, $crate::tracing::Level::ERROR, $($arg)+)
|
||||
});
|
||||
|
||||
// error!(e; target: "my_target", "a {} event", "log")
|
||||
($e:expr; target: $target:expr, $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
$crate::tracing::Level::ERROR,
|
||||
err = ?$e,
|
||||
$($arg)+
|
||||
)
|
||||
@@ -50,7 +48,7 @@ macro_rules! error {
|
||||
(%$e:expr; target: $target:expr, $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
$crate::tracing::Level::ERROR,
|
||||
err = %$e,
|
||||
$($arg)+
|
||||
)
|
||||
@@ -59,7 +57,7 @@ macro_rules! error {
|
||||
// error!(e; "a {} event", "log")
|
||||
($e:expr; $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
$crate::tracing::Level::ERROR,
|
||||
err = ?$e,
|
||||
$($arg)+
|
||||
)
|
||||
@@ -68,7 +66,7 @@ macro_rules! error {
|
||||
// error!(%e; "a {} event", "log")
|
||||
(%$e:expr; $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
$crate::tracing::Level::ERROR,
|
||||
err = %$e,
|
||||
$($arg)+
|
||||
)
|
||||
@@ -76,7 +74,7 @@ macro_rules! error {
|
||||
|
||||
// error!("a {} event", "log")
|
||||
($($arg:tt)+) => ({
|
||||
$crate::log!($crate::logging::Level::ERROR, $($arg)+)
|
||||
$crate::log!($crate::tracing::Level::ERROR, $($arg)+)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -85,13 +83,13 @@ macro_rules! error {
|
||||
macro_rules! warn {
|
||||
// warn!(target: "my_target", "a {} event", "log")
|
||||
(target: $target:expr, $($arg:tt)+) => {
|
||||
$crate::log!(target: $target, $crate::logging::Level::WARN, $($arg)+)
|
||||
$crate::log!(target: $target, $crate::tracing::Level::WARN, $($arg)+)
|
||||
};
|
||||
|
||||
// warn!(e; "a {} event", "log")
|
||||
($e:expr; $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
$crate::tracing::Level::WARN,
|
||||
err = ?$e,
|
||||
$($arg)+
|
||||
)
|
||||
@@ -100,7 +98,7 @@ macro_rules! warn {
|
||||
// warn!(%e; "a {} event", "log")
|
||||
(%$e:expr; $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
$crate::tracing::Level::WARN,
|
||||
err = %$e,
|
||||
$($arg)+
|
||||
)
|
||||
@@ -108,7 +106,7 @@ macro_rules! warn {
|
||||
|
||||
// warn!("a {} event", "log")
|
||||
($($arg:tt)+) => {
|
||||
$crate::log!($crate::logging::Level::WARN, $($arg)+)
|
||||
$crate::log!($crate::tracing::Level::WARN, $($arg)+)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -117,12 +115,12 @@ macro_rules! warn {
|
||||
macro_rules! info {
|
||||
// info!(target: "my_target", "a {} event", "log")
|
||||
(target: $target:expr, $($arg:tt)+) => {
|
||||
$crate::log!(target: $target, $crate::logging::Level::INFO, $($arg)+)
|
||||
$crate::log!(target: $target, $crate::tracing::Level::INFO, $($arg)+)
|
||||
};
|
||||
|
||||
// info!("a {} event", "log")
|
||||
($($arg:tt)+) => {
|
||||
$crate::log!($crate::logging::Level::INFO, $($arg)+)
|
||||
$crate::log!($crate::tracing::Level::INFO, $($arg)+)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -131,12 +129,12 @@ macro_rules! info {
|
||||
macro_rules! debug {
|
||||
// debug!(target: "my_target", "a {} event", "log")
|
||||
(target: $target:expr, $($arg:tt)+) => {
|
||||
$crate::log!(target: $target, $crate::logging::Level::DEBUG, $($arg)+)
|
||||
$crate::log!(target: $target, $crate::tracing::Level::DEBUG, $($arg)+)
|
||||
};
|
||||
|
||||
// debug!("a {} event", "log")
|
||||
($($arg:tt)+) => {
|
||||
$crate::log!($crate::logging::Level::DEBUG, $($arg)+)
|
||||
$crate::log!($crate::tracing::Level::DEBUG, $($arg)+)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -145,12 +143,12 @@ macro_rules! debug {
|
||||
macro_rules! trace {
|
||||
// trace!(target: "my_target", "a {} event", "log")
|
||||
(target: $target:expr, $($arg:tt)+) => {
|
||||
$crate::log!(target: $target, $crate::logging::Level::TRACE, $($arg)+)
|
||||
$crate::log!(target: $target, $crate::tracing::Level::TRACE, $($arg)+)
|
||||
};
|
||||
|
||||
// trace!("a {} event", "log")
|
||||
($($arg:tt)+) => {
|
||||
$crate::log!($crate::logging::Level::TRACE, $($arg)+)
|
||||
$crate::log!($crate::tracing::Level::TRACE, $($arg)+)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -158,8 +156,7 @@ macro_rules! trace {
|
||||
mod tests {
|
||||
use common_error::mock::MockError;
|
||||
use common_error::status_code::StatusCode;
|
||||
|
||||
use crate::logging::Level;
|
||||
use tracing::Level;
|
||||
|
||||
macro_rules! all_log_macros {
|
||||
($($arg:tt)*) => {
|
||||
|
||||
92
src/common/telemetry/src/tracing_context.rs
Normal file
92
src/common/telemetry/src/tracing_context.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! tracing stuffs, inspired by RisingWave
|
||||
use std::collections::HashMap;
|
||||
|
||||
use opentelemetry::propagation::TextMapPropagator;
|
||||
use opentelemetry_sdk::propagation::TraceContextPropagator;
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
|
||||
// An wapper for `Futures` that provides tracing instrument adapters.
|
||||
pub trait FutureExt: std::future::Future + Sized {
|
||||
fn trace(self, span: tracing::span::Span) -> tracing::instrument::Instrumented<Self>;
|
||||
}
|
||||
|
||||
impl<T: std::future::Future> FutureExt for T {
|
||||
#[inline]
|
||||
fn trace(self, span: tracing::span::Span) -> tracing::instrument::Instrumented<Self> {
|
||||
tracing::instrument::Instrument::instrument(self, span)
|
||||
}
|
||||
}
|
||||
|
||||
/// Context for tracing used for propagating tracing information in a distributed system.
|
||||
///
|
||||
/// Generally, the caller of a service should create a tracing context from the current tracing span
|
||||
/// and pass it to the callee through the network. The callee will then attach its local tracing
|
||||
/// span as a child of the tracing context, so that the external tracing service can associate them
|
||||
/// in a single trace.
|
||||
///
|
||||
/// The tracing context must be serialized into the W3C trace context format and passed in rpc
|
||||
/// message headers when communication of frontend, datanode and meta.
|
||||
///
|
||||
/// See [Trace Context](https://www.w3.org/TR/trace-context/) for more information.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TracingContext(opentelemetry::Context);
|
||||
|
||||
pub type W3cTrace = HashMap<String, String>;
|
||||
|
||||
impl Default for TracingContext {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
type Propagator = TraceContextPropagator;
|
||||
|
||||
impl TracingContext {
|
||||
/// Create a new tracing context from a tracing span.
|
||||
pub fn from_span(span: &tracing::Span) -> Self {
|
||||
Self(span.context())
|
||||
}
|
||||
|
||||
/// Create a new tracing context from the current tracing span considered by the subscriber.
|
||||
pub fn from_current_span() -> Self {
|
||||
Self::from_span(&tracing::Span::current())
|
||||
}
|
||||
|
||||
/// Create a no-op tracing context.
|
||||
pub fn new() -> Self {
|
||||
Self(opentelemetry::Context::new())
|
||||
}
|
||||
|
||||
/// Attach the given span as a child of the context. Returns the attached span.
|
||||
pub fn attach(&self, span: tracing::Span) -> tracing::Span {
|
||||
span.set_parent(self.0.clone());
|
||||
span
|
||||
}
|
||||
|
||||
/// Convert the tracing context to the W3C trace context format.
|
||||
pub fn to_w3c(&self) -> W3cTrace {
|
||||
let mut fields = HashMap::new();
|
||||
Propagator::new().inject_context(&self.0, &mut fields);
|
||||
fields
|
||||
}
|
||||
|
||||
/// Create a new tracing context from the W3C trace context format.
|
||||
pub fn from_w3c(fields: &W3cTrace) -> Self {
|
||||
let context = Propagator::new().extract(fields);
|
||||
Self(context)
|
||||
}
|
||||
}
|
||||
@@ -15,10 +15,11 @@
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
|
||||
use chrono::{LocalResult, NaiveDateTime};
|
||||
use chrono::{LocalResult, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::{Error, InvalidDateStrSnafu, Result};
|
||||
use crate::timezone::TimeZone;
|
||||
use crate::util::{format_utc_datetime, local_datetime_to_utc};
|
||||
use crate::Date;
|
||||
|
||||
@@ -108,6 +109,15 @@ impl DateTime {
|
||||
NaiveDateTime::from_timestamp_millis(self.0)
|
||||
}
|
||||
|
||||
pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> {
|
||||
let datetime = self.to_chrono_datetime();
|
||||
datetime.map(|v| match tz {
|
||||
Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
|
||||
Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
|
||||
None => Utc.from_utc_datetime(&v).naive_local(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert to [common_time::date].
|
||||
pub fn to_date(&self) -> Option<Date> {
|
||||
self.to_chrono_datetime().map(|d| Date::from(d.date()))
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::time::Duration;
|
||||
|
||||
use arrow::datatypes::TimeUnit as ArrowTimeUnit;
|
||||
use chrono::{
|
||||
DateTime, LocalResult, NaiveDate, NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone,
|
||||
DateTime, LocalResult, NaiveDate, NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -252,6 +252,15 @@ impl Timestamp {
|
||||
NaiveDateTime::from_timestamp_opt(sec, nsec)
|
||||
}
|
||||
|
||||
pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> {
|
||||
let datetime = self.to_chrono_datetime();
|
||||
datetime.map(|v| match tz {
|
||||
Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
|
||||
Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
|
||||
None => Utc.from_utc_datetime(&v).naive_local(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert timestamp to chrono date.
|
||||
pub fn to_chrono_date(&self) -> Option<NaiveDate> {
|
||||
self.to_chrono_datetime().map(|ndt| ndt.date())
|
||||
|
||||
@@ -4,9 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
@@ -17,6 +14,7 @@ axum = "0.6"
|
||||
axum-macros = "0.3"
|
||||
bytes = "1.1"
|
||||
catalog.workspace = true
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
@@ -61,7 +59,6 @@ servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
storage.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
@@ -77,7 +74,9 @@ uuid.workspace = true
|
||||
[dev-dependencies]
|
||||
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
|
||||
client.workspace = true
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-query.workspace = true
|
||||
common-test-util.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
mito2 = { workspace = true, features = ["test"] }
|
||||
session.workspace = true
|
||||
|
||||
@@ -129,8 +129,12 @@ impl RegionAliveKeeper {
|
||||
let (role, region_id) = (region.role().into(), RegionId::from(region.region_id));
|
||||
if let Some(handle) = self.find_handle(region_id).await {
|
||||
handle.reset_deadline(role, deadline).await;
|
||||
} else {
|
||||
warn!(
|
||||
"Trying to renew the lease for region {region_id}, the keeper handler is not found!"
|
||||
);
|
||||
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
|
||||
}
|
||||
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,12 +377,16 @@ impl CountdownTask {
|
||||
countdown.set(tokio::time::sleep_until(first_deadline));
|
||||
},
|
||||
Some(CountdownCommand::Reset((role, deadline))) => {
|
||||
// The first-time granted regions might be ignored because the `first_deadline` is larger than the `region_lease_timeout`.
|
||||
// Therefore, we set writable at the outside.
|
||||
// TODO(weny): Considers setting `first_deadline` to `region_lease_timeout`.
|
||||
let _ = self.region_server.set_writable(self.region_id, role.writable());
|
||||
|
||||
if countdown.deadline() < deadline {
|
||||
trace!(
|
||||
"Reset deadline of region {region_id} to approximately {} seconds later",
|
||||
(deadline - Instant::now()).as_secs_f32(),
|
||||
);
|
||||
let _ = self.region_server.set_writable(self.region_id, role.writable());
|
||||
countdown.set(tokio::time::sleep_until(deadline));
|
||||
}
|
||||
// Else the countdown could be either:
|
||||
|
||||
@@ -31,11 +31,6 @@ use serde::{Deserialize, Serialize};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::Mode;
|
||||
use storage::config::{
|
||||
EngineConfig as StorageEngineConfig, DEFAULT_AUTO_FLUSH_INTERVAL, DEFAULT_MAX_FLUSH_TASKS,
|
||||
DEFAULT_PICKER_SCHEDULE_INTERVAL, DEFAULT_REGION_WRITE_BUFFER_SIZE,
|
||||
};
|
||||
use storage::scheduler::SchedulerConfig;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::mb(256);
|
||||
|
||||
@@ -68,9 +63,6 @@ pub struct StorageConfig {
|
||||
pub data_home: String,
|
||||
#[serde(flatten)]
|
||||
pub store: ObjectStoreConfig,
|
||||
pub compaction: CompactionConfig,
|
||||
pub manifest: RegionManifestConfig,
|
||||
pub flush: FlushConfig,
|
||||
}
|
||||
|
||||
impl Default for StorageConfig {
|
||||
@@ -79,9 +71,6 @@ impl Default for StorageConfig {
|
||||
global_ttl: None,
|
||||
data_home: DEFAULT_DATA_HOME.to_string(),
|
||||
store: ObjectStoreConfig::default(),
|
||||
compaction: CompactionConfig::default(),
|
||||
manifest: RegionManifestConfig::default(),
|
||||
flush: FlushConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -216,109 +205,6 @@ impl Default for ObjectStoreConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for region manifest
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct RegionManifestConfig {
|
||||
/// Region manifest checkpoint actions margin.
|
||||
/// Manifest service create a checkpoint every `checkpoint_margin` actions.
|
||||
pub checkpoint_margin: Option<u16>,
|
||||
/// Region manifest logs and checkpoints gc task execution duration.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub gc_duration: Option<Duration>,
|
||||
/// Whether to compress manifest and checkpoint file by gzip
|
||||
pub compress: bool,
|
||||
}
|
||||
|
||||
impl Default for RegionManifestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
checkpoint_margin: Some(10u16),
|
||||
gc_duration: Some(Duration::from_secs(600)),
|
||||
compress: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for table compaction
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct CompactionConfig {
|
||||
/// Max task number that can concurrently run.
|
||||
pub max_inflight_tasks: usize,
|
||||
/// Max files in level 0 to trigger compaction.
|
||||
pub max_files_in_level0: usize,
|
||||
/// Max task number for SST purge task after compaction.
|
||||
pub max_purge_tasks: usize,
|
||||
}
|
||||
|
||||
impl Default for CompactionConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_inflight_tasks: 4,
|
||||
max_files_in_level0: 8,
|
||||
max_purge_tasks: 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct FlushConfig {
|
||||
/// Max inflight flush tasks.
|
||||
pub max_flush_tasks: usize,
|
||||
/// Default write buffer size for a region.
|
||||
pub region_write_buffer_size: ReadableSize,
|
||||
/// Interval to schedule auto flush picker to find region to flush.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub picker_schedule_interval: Duration,
|
||||
/// Interval to auto flush a region if it has not flushed yet.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub auto_flush_interval: Duration,
|
||||
/// Global write buffer size for all regions.
|
||||
pub global_write_buffer_size: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
impl Default for FlushConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_flush_tasks: DEFAULT_MAX_FLUSH_TASKS,
|
||||
region_write_buffer_size: DEFAULT_REGION_WRITE_BUFFER_SIZE,
|
||||
picker_schedule_interval: Duration::from_millis(
|
||||
DEFAULT_PICKER_SCHEDULE_INTERVAL.into(),
|
||||
),
|
||||
auto_flush_interval: Duration::from_millis(DEFAULT_AUTO_FLUSH_INTERVAL.into()),
|
||||
global_write_buffer_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DatanodeOptions> for SchedulerConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
max_inflight_tasks: value.storage.compaction.max_inflight_tasks,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DatanodeOptions> for StorageEngineConfig {
|
||||
fn from(value: &DatanodeOptions) -> Self {
|
||||
Self {
|
||||
compress_manifest: value.storage.manifest.compress,
|
||||
manifest_checkpoint_margin: value.storage.manifest.checkpoint_margin,
|
||||
manifest_gc_duration: value.storage.manifest.gc_duration,
|
||||
max_files_in_l0: value.storage.compaction.max_files_in_level0,
|
||||
max_purge_tasks: value.storage.compaction.max_purge_tasks,
|
||||
max_flush_tasks: value.storage.flush.max_flush_tasks,
|
||||
region_write_buffer_size: value.storage.flush.region_write_buffer_size,
|
||||
picker_schedule_interval: value.storage.flush.picker_schedule_interval,
|
||||
auto_flush_interval: value.storage.flush.auto_flush_interval,
|
||||
global_write_buffer_size: value.storage.flush.global_write_buffer_size,
|
||||
global_ttl: value.storage.global_ttl,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct DatanodeOptions {
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
//! Datanode implementation.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use catalog::memory::MemoryCatalogManager;
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -26,8 +27,9 @@ use common_meta::key::datanode_table::DatanodeTableManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
pub use common_procedure::options::ProcedureConfig;
|
||||
use common_runtime::Runtime;
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use file_engine::engine::FileRegionEngine;
|
||||
use futures::future;
|
||||
use futures_util::future::try_join_all;
|
||||
use futures_util::StreamExt;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
@@ -36,6 +38,10 @@ use mito2::engine::MitoEngine;
|
||||
use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef};
|
||||
use object_store::util::normalize_dir;
|
||||
use query::QueryEngineFactory;
|
||||
use servers::grpc::{GrpcServer, GrpcServerConfig};
|
||||
use servers::http::HttpServerBuilder;
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::server::{start_server, ServerHandler, ServerHandlers};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::logstore::LogStore;
|
||||
@@ -48,26 +54,26 @@ use tokio::sync::Notify;
|
||||
|
||||
use crate::config::{DatanodeOptions, RegionEngineConfig};
|
||||
use crate::error::{
|
||||
CreateDirSnafu, GetMetadataSnafu, MissingKvBackendSnafu, MissingMetaClientSnafu,
|
||||
MissingMetasrvOptsSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu, Result, RuntimeResourceSnafu,
|
||||
ShutdownInstanceSnafu,
|
||||
CreateDirSnafu, GetMetadataSnafu, MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu,
|
||||
ParseAddrSnafu, Result, RuntimeResourceSnafu, ShutdownInstanceSnafu, ShutdownServerSnafu,
|
||||
StartServerSnafu,
|
||||
};
|
||||
use crate::event_listener::{
|
||||
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
|
||||
RegionServerEventReceiver,
|
||||
};
|
||||
use crate::greptimedb_telemetry::get_greptimedb_telemetry_task;
|
||||
use crate::heartbeat::{new_metasrv_client, HeartbeatTask};
|
||||
use crate::region_server::RegionServer;
|
||||
use crate::server::Services;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
use crate::region_server::{DummyTableProviderFactory, RegionServer};
|
||||
use crate::store;
|
||||
|
||||
const OPEN_REGION_PARALLELISM: usize = 16;
|
||||
const REGION_SERVER_SERVICE_NAME: &str = "REGION_SERVER_SERVICE";
|
||||
const DATANODE_HTTP_SERVICE_NAME: &str = "DATANODE_HTTP_SERVICE";
|
||||
|
||||
/// Datanode service.
|
||||
pub struct Datanode {
|
||||
opts: DatanodeOptions,
|
||||
services: Option<Services>,
|
||||
services: ServerHandlers,
|
||||
heartbeat_task: Option<HeartbeatTask>,
|
||||
region_event_receiver: Option<RegionServerEventReceiver>,
|
||||
region_server: RegionServer,
|
||||
@@ -83,10 +89,17 @@ impl Datanode {
|
||||
self.start_heartbeat().await?;
|
||||
self.wait_coordinated().await;
|
||||
|
||||
let _ = self.greptimedb_telemetry_task.start();
|
||||
self.start_telemetry();
|
||||
|
||||
self.start_services().await
|
||||
}
|
||||
|
||||
pub fn start_telemetry(&self) {
|
||||
if let Err(e) = self.greptimedb_telemetry_task.start() {
|
||||
warn!(e; "Failed to start telemetry task!");
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_heartbeat(&mut self) -> Result<()> {
|
||||
if let Some(task) = &self.heartbeat_task {
|
||||
// Safety: The event_receiver must exist.
|
||||
@@ -106,19 +119,17 @@ impl Datanode {
|
||||
|
||||
/// Start services of datanode. This method call will block until services are shutdown.
|
||||
pub async fn start_services(&mut self) -> Result<()> {
|
||||
if let Some(service) = self.services.as_mut() {
|
||||
service.start(&self.opts).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
let _ = future::try_join_all(self.services.values().map(start_server))
|
||||
.await
|
||||
.context(StartServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn shutdown_services(&self) -> Result<()> {
|
||||
if let Some(service) = self.services.as_ref() {
|
||||
service.shutdown().await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
let _ = future::try_join_all(self.services.values().map(|server| server.0.shutdown()))
|
||||
.await
|
||||
.context(ShutdownServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
@@ -150,17 +161,21 @@ pub struct DatanodeBuilder {
|
||||
plugins: Plugins,
|
||||
meta_client: Option<MetaClient>,
|
||||
kv_backend: Option<KvBackendRef>,
|
||||
enable_region_server_service: bool,
|
||||
enable_http_service: bool,
|
||||
}
|
||||
|
||||
impl DatanodeBuilder {
|
||||
/// `kv_backend` is optional. If absent, the builder will try to build one
|
||||
/// by using the given `opts`
|
||||
pub fn new(opts: DatanodeOptions, kv_backend: Option<KvBackendRef>, plugins: Plugins) -> Self {
|
||||
pub fn new(opts: DatanodeOptions, plugins: Plugins) -> Self {
|
||||
Self {
|
||||
opts,
|
||||
plugins,
|
||||
meta_client: None,
|
||||
kv_backend,
|
||||
kv_backend: None,
|
||||
enable_region_server_service: false,
|
||||
enable_http_service: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,76 +186,63 @@ impl DatanodeBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_kv_backend(self, kv_backend: KvBackendRef) -> Self {
|
||||
Self {
|
||||
kv_backend: Some(kv_backend),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_region_server_service(self) -> Self {
|
||||
Self {
|
||||
enable_region_server_service: true,
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_http_service(self) -> Self {
|
||||
Self {
|
||||
enable_http_service: true,
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn build(mut self) -> Result<Datanode> {
|
||||
let mode = &self.opts.mode;
|
||||
|
||||
// build meta client
|
||||
let meta_client = match mode {
|
||||
Mode::Distributed => {
|
||||
let meta_client = if let Some(meta_client) = self.meta_client.take() {
|
||||
meta_client
|
||||
} else {
|
||||
let node_id = self.opts.node_id.context(MissingNodeIdSnafu)?;
|
||||
let meta_client = self.meta_client.take();
|
||||
|
||||
let meta_config = self
|
||||
.opts
|
||||
.meta_client
|
||||
.as_ref()
|
||||
.context(MissingMetasrvOptsSnafu)?;
|
||||
// If metasrv client is provided, we will use it to control the region server.
|
||||
// Otherwise the region server is self-controlled, meaning no heartbeat and immediately
|
||||
// writable upon open.
|
||||
let controlled_by_metasrv = meta_client.is_some();
|
||||
|
||||
new_metasrv_client(node_id, meta_config).await?
|
||||
};
|
||||
Some(meta_client)
|
||||
}
|
||||
Mode::Standalone => None,
|
||||
};
|
||||
|
||||
// build kv-backend
|
||||
let kv_backend = match mode {
|
||||
Mode::Distributed => Arc::new(MetaKvBackend {
|
||||
client: Arc::new(meta_client.clone().context(MissingMetaClientSnafu)?),
|
||||
}),
|
||||
Mode::Standalone => self.kv_backend.clone().context(MissingKvBackendSnafu)?,
|
||||
};
|
||||
let kv_backend = self.kv_backend.take().context(MissingKvBackendSnafu)?;
|
||||
|
||||
// build and initialize region server
|
||||
let log_store = Self::build_log_store(&self.opts).await?;
|
||||
let (region_event_listener, region_event_receiver) = match mode {
|
||||
Mode::Distributed => {
|
||||
let (tx, rx) = new_region_server_event_channel();
|
||||
(Box::new(tx) as RegionServerEventListenerRef, Some(rx))
|
||||
}
|
||||
Mode::Standalone => (
|
||||
Box::new(NoopRegionServerEventListener) as RegionServerEventListenerRef,
|
||||
None,
|
||||
),
|
||||
|
||||
let (region_event_listener, region_event_receiver) = if controlled_by_metasrv {
|
||||
let (tx, rx) = new_region_server_event_channel();
|
||||
(Box::new(tx) as _, Some(rx))
|
||||
} else {
|
||||
(Box::new(NoopRegionServerEventListener) as _, None)
|
||||
};
|
||||
|
||||
let region_server = Self::new_region_server(
|
||||
&self.opts,
|
||||
self.plugins.clone(),
|
||||
log_store,
|
||||
region_event_listener,
|
||||
)
|
||||
.await?;
|
||||
self.initialize_region_server(®ion_server, kv_backend, matches!(mode, Mode::Standalone))
|
||||
let region_server = self
|
||||
.new_region_server(log_store, region_event_listener)
|
||||
.await?;
|
||||
|
||||
let heartbeat_task = match mode {
|
||||
Mode::Distributed => {
|
||||
let meta_client = meta_client.context(MissingMetaClientSnafu)?;
|
||||
self.initialize_region_server(®ion_server, kv_backend, !controlled_by_metasrv)
|
||||
.await?;
|
||||
|
||||
let heartbeat_task =
|
||||
HeartbeatTask::try_new(&self.opts, region_server.clone(), meta_client).await?;
|
||||
Some(heartbeat_task)
|
||||
}
|
||||
Mode::Standalone => None,
|
||||
let heartbeat_task = if let Some(meta_client) = meta_client {
|
||||
Some(HeartbeatTask::try_new(&self.opts, region_server.clone(), meta_client).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let services = match mode {
|
||||
Mode::Distributed => Some(Services::try_new(region_server.clone(), &self.opts).await?),
|
||||
Mode::Standalone => None,
|
||||
};
|
||||
let services = self.create_datanode_services(®ion_server)?;
|
||||
|
||||
let greptimedb_telemetry_task = get_greptimedb_telemetry_task(
|
||||
Some(self.opts.storage.data_home.clone()),
|
||||
@@ -257,7 +259,6 @@ impl DatanodeBuilder {
|
||||
};
|
||||
|
||||
Ok(Datanode {
|
||||
opts: self.opts,
|
||||
services,
|
||||
heartbeat_task,
|
||||
region_server,
|
||||
@@ -268,6 +269,68 @@ impl DatanodeBuilder {
|
||||
})
|
||||
}
|
||||
|
||||
fn create_datanode_services(&self, region_server: &RegionServer) -> Result<ServerHandlers> {
|
||||
let mut services = HashMap::new();
|
||||
|
||||
if self.enable_region_server_service {
|
||||
services.insert(
|
||||
REGION_SERVER_SERVICE_NAME.to_string(),
|
||||
self.create_region_server_service(region_server)?,
|
||||
);
|
||||
}
|
||||
|
||||
if self.enable_http_service {
|
||||
services.insert(
|
||||
DATANODE_HTTP_SERVICE_NAME.to_string(),
|
||||
self.create_http_service()?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(services)
|
||||
}
|
||||
|
||||
fn create_region_server_service(&self, region_server: &RegionServer) -> Result<ServerHandler> {
|
||||
let opts = &self.opts;
|
||||
|
||||
let config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.rpc_max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.rpc_max_send_message_size.as_bytes() as usize,
|
||||
};
|
||||
|
||||
let server = Box::new(GrpcServer::new(
|
||||
Some(config),
|
||||
None,
|
||||
None,
|
||||
Some(Arc::new(region_server.clone()) as _),
|
||||
Some(Arc::new(region_server.clone()) as _),
|
||||
None,
|
||||
region_server.runtime(),
|
||||
));
|
||||
|
||||
let addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.rpc_addr,
|
||||
})?;
|
||||
|
||||
Ok((server, addr))
|
||||
}
|
||||
|
||||
fn create_http_service(&self) -> Result<ServerHandler> {
|
||||
let opts = &self.opts;
|
||||
|
||||
let server = Box::new(
|
||||
HttpServerBuilder::new(opts.http.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.with_greptime_config_options(opts.to_toml_string())
|
||||
.build(),
|
||||
);
|
||||
|
||||
let addr = opts.http.addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.http.addr,
|
||||
})?;
|
||||
|
||||
Ok((server, addr))
|
||||
}
|
||||
|
||||
/// Open all regions belong to this datanode.
|
||||
async fn initialize_region_server(
|
||||
&self,
|
||||
@@ -329,18 +392,19 @@ impl DatanodeBuilder {
|
||||
}
|
||||
|
||||
async fn new_region_server(
|
||||
opts: &DatanodeOptions,
|
||||
plugins: Plugins,
|
||||
&self,
|
||||
log_store: Arc<RaftEngineLogStore>,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
) -> Result<RegionServer> {
|
||||
let opts = &self.opts;
|
||||
|
||||
let query_engine_factory = QueryEngineFactory::new_with_plugins(
|
||||
// query engine in datanode only executes plan with resolved table source.
|
||||
MemoryCatalogManager::with_default_setup(),
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins,
|
||||
self.plugins.clone(),
|
||||
);
|
||||
let query_engine = query_engine_factory.query_engine();
|
||||
|
||||
@@ -352,8 +416,15 @@ impl DatanodeBuilder {
|
||||
.context(RuntimeResourceSnafu)?,
|
||||
);
|
||||
|
||||
let mut region_server =
|
||||
RegionServer::new(query_engine.clone(), runtime.clone(), event_listener);
|
||||
let table_provider_factory = Arc::new(DummyTableProviderFactory);
|
||||
|
||||
let mut region_server = RegionServer::with_table_provider(
|
||||
query_engine,
|
||||
runtime,
|
||||
event_listener,
|
||||
table_provider_factory,
|
||||
);
|
||||
|
||||
let object_store = store::new_object_store(opts).await?;
|
||||
let object_store_manager = ObjectStoreManager::new(
|
||||
"default", // TODO: use a name which is set in the configuration when #919 is done.
|
||||
@@ -472,7 +543,6 @@ mod tests {
|
||||
node_id: Some(0),
|
||||
..Default::default()
|
||||
},
|
||||
None,
|
||||
Plugins::default(),
|
||||
);
|
||||
|
||||
|
||||
@@ -212,9 +212,6 @@ pub enum Error {
|
||||
#[snafu(display("Expect KvBackend but not found"))]
|
||||
MissingKvBackend { location: Location },
|
||||
|
||||
#[snafu(display("Expect MetaClient but not found"))]
|
||||
MissingMetaClient { location: Location },
|
||||
|
||||
#[snafu(display("Invalid SQL, error: {}", msg))]
|
||||
InvalidSql { msg: String },
|
||||
|
||||
@@ -295,9 +292,6 @@ pub enum Error {
|
||||
#[snafu(display("Missing node id in Datanode config"))]
|
||||
MissingNodeId { location: Location },
|
||||
|
||||
#[snafu(display("Missing node id option in distributed mode"))]
|
||||
MissingMetasrvOpts { location: Location },
|
||||
|
||||
#[snafu(display("Missing required field: {}", name))]
|
||||
MissingRequiredField { name: String, location: Location },
|
||||
|
||||
@@ -477,13 +471,11 @@ impl ErrorExt for Error {
|
||||
| SchemaExists { .. }
|
||||
| DatabaseNotFound { .. }
|
||||
| MissingNodeId { .. }
|
||||
| MissingMetasrvOpts { .. }
|
||||
| ColumnNoneDefaultValue { .. }
|
||||
| MissingWalDirConfig { .. }
|
||||
| PrepareImmutableTable { .. }
|
||||
| ColumnDataType { .. }
|
||||
| MissingKvBackend { .. }
|
||||
| MissingMetaClient { .. } => StatusCode::InvalidArguments,
|
||||
| MissingKvBackend { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
EncodeJson { .. } | PayloadNotExist { .. } | Unexpected { .. } => {
|
||||
StatusCode::Unexpected
|
||||
|
||||
@@ -37,6 +37,7 @@ use crate::alive_keeper::RegionAliveKeeper;
|
||||
use crate::config::DatanodeOptions;
|
||||
use crate::error::{self, MetaClientInitSnafu, Result};
|
||||
use crate::event_listener::RegionServerEventReceiver;
|
||||
use crate::metrics;
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
pub(crate) mod handler;
|
||||
@@ -72,9 +73,9 @@ impl HeartbeatTask {
|
||||
opts.heartbeat.interval.as_millis() as u64,
|
||||
));
|
||||
let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![
|
||||
region_alive_keeper.clone(),
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())),
|
||||
region_alive_keeper.clone(),
|
||||
]));
|
||||
|
||||
Ok(Self {
|
||||
@@ -101,8 +102,10 @@ impl HeartbeatTask {
|
||||
quit_signal: Arc<Notify>,
|
||||
) -> Result<HeartbeatSender> {
|
||||
let client_id = meta_client.id();
|
||||
|
||||
let (tx, mut rx) = meta_client.heartbeat().await.context(MetaClientInitSnafu)?;
|
||||
|
||||
let mut last_received_lease = Instant::now();
|
||||
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
while let Some(res) = match rx.message().await {
|
||||
Ok(m) => m,
|
||||
@@ -114,6 +117,28 @@ impl HeartbeatTask {
|
||||
if let Some(msg) = res.mailbox_message.as_ref() {
|
||||
info!("Received mailbox message: {msg:?}, meta_client id: {client_id:?}");
|
||||
}
|
||||
if let Some(lease) = res.region_lease.as_ref() {
|
||||
metrics::LAST_RECEIVED_HEARTBEAT_ELAPSED
|
||||
.set(last_received_lease.elapsed().as_millis() as i64);
|
||||
// Resets the timer.
|
||||
last_received_lease = Instant::now();
|
||||
|
||||
let mut leader_region_lease_count = 0;
|
||||
let mut follower_region_lease_count = 0;
|
||||
for lease in &lease.regions {
|
||||
match lease.role() {
|
||||
RegionRole::Leader => leader_region_lease_count += 1,
|
||||
RegionRole::Follower => follower_region_lease_count += 1,
|
||||
}
|
||||
}
|
||||
|
||||
metrics::HEARTBEAT_REGION_LEASES
|
||||
.with_label_values(&["leader"])
|
||||
.set(leader_region_lease_count);
|
||||
metrics::HEARTBEAT_REGION_LEASES
|
||||
.with_label_values(&["follower"])
|
||||
.set(follower_region_lease_count);
|
||||
}
|
||||
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), res);
|
||||
if let Err(e) = Self::handle_response(ctx, handler_executor.clone()).await {
|
||||
error!(e; "Error while handling heartbeat response");
|
||||
|
||||
@@ -13,54 +13,119 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
|
||||
};
|
||||
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
|
||||
use common_meta::instruction::{
|
||||
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply, OpenRegion, SimpleReply,
|
||||
};
|
||||
use common_meta::RegionIdent;
|
||||
use common_query::Output;
|
||||
use common_telemetry::error;
|
||||
use futures::future::BoxFuture;
|
||||
use snafu::OptionExt;
|
||||
use store_api::path_utils::region_dir;
|
||||
use store_api::region_engine::SetReadonlyResponse;
|
||||
use store_api::region_request::{RegionCloseRequest, RegionOpenRequest, RegionRequest};
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error;
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
/// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion].
|
||||
#[derive(Clone)]
|
||||
pub struct RegionHeartbeatResponseHandler {
|
||||
region_server: RegionServer,
|
||||
}
|
||||
|
||||
/// Handler of the instruction.
|
||||
pub type InstructionHandler =
|
||||
Box<dyn FnOnce(RegionServer) -> BoxFuture<'static, InstructionReply> + Send>;
|
||||
|
||||
impl RegionHeartbeatResponseHandler {
|
||||
/// Returns the [RegionHeartbeatResponseHandler].
|
||||
pub fn new(region_server: RegionServer) -> Self {
|
||||
Self { region_server }
|
||||
}
|
||||
|
||||
fn instruction_to_request(instruction: Instruction) -> MetaResult<(RegionId, RegionRequest)> {
|
||||
/// Builds the [InstructionHandler].
|
||||
fn build_handler(instruction: Instruction) -> MetaResult<InstructionHandler> {
|
||||
match instruction {
|
||||
Instruction::OpenRegion(OpenRegion {
|
||||
region_ident,
|
||||
region_storage_path,
|
||||
options,
|
||||
}) => {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let open_region_req = RegionRequest::Open(RegionOpenRequest {
|
||||
engine: region_ident.engine,
|
||||
region_dir: region_dir(®ion_storage_path, region_id),
|
||||
options,
|
||||
});
|
||||
Ok((region_id, open_region_req))
|
||||
}) => Ok(Box::new(|region_server| {
|
||||
Box::pin(async move {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let request = RegionRequest::Open(RegionOpenRequest {
|
||||
engine: region_ident.engine,
|
||||
region_dir: region_dir(®ion_storage_path, region_id),
|
||||
options,
|
||||
});
|
||||
let result = region_server.handle_request(region_id, request).await;
|
||||
|
||||
let success = result.is_ok();
|
||||
let error = result.as_ref().map_err(|e| e.to_string()).err();
|
||||
|
||||
InstructionReply::OpenRegion(SimpleReply {
|
||||
result: success,
|
||||
error,
|
||||
})
|
||||
})
|
||||
})),
|
||||
Instruction::CloseRegion(region_ident) => Ok(Box::new(|region_server| {
|
||||
Box::pin(async move {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let request = RegionRequest::Close(RegionCloseRequest {});
|
||||
let result = region_server.handle_request(region_id, request).await;
|
||||
|
||||
match result {
|
||||
Ok(_) => InstructionReply::CloseRegion(SimpleReply {
|
||||
result: true,
|
||||
error: None,
|
||||
}),
|
||||
Err(error::Error::RegionNotFound { .. }) => {
|
||||
InstructionReply::CloseRegion(SimpleReply {
|
||||
result: true,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Err(err) => InstructionReply::CloseRegion(SimpleReply {
|
||||
result: false,
|
||||
error: Some(err.to_string()),
|
||||
}),
|
||||
}
|
||||
})
|
||||
})),
|
||||
Instruction::DowngradeRegion(DowngradeRegion { region_id }) => {
|
||||
Ok(Box::new(move |region_server| {
|
||||
Box::pin(async move {
|
||||
match region_server.set_readonly_gracefully(region_id).await {
|
||||
Ok(SetReadonlyResponse::Success { last_entry_id }) => {
|
||||
InstructionReply::DowngradeRegion(DowngradeRegionReply {
|
||||
last_entry_id,
|
||||
exists: true,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Ok(SetReadonlyResponse::NotFound) => {
|
||||
InstructionReply::DowngradeRegion(DowngradeRegionReply {
|
||||
last_entry_id: None,
|
||||
exists: false,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
|
||||
last_entry_id: None,
|
||||
exists: false,
|
||||
error: Some(err.to_string()),
|
||||
}),
|
||||
}
|
||||
})
|
||||
}))
|
||||
}
|
||||
Instruction::CloseRegion(region_ident) => {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let close_region_req = RegionRequest::Close(RegionCloseRequest {});
|
||||
Ok((region_id, close_region_req))
|
||||
Instruction::UpgradeRegion(_) => {
|
||||
todo!()
|
||||
}
|
||||
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
|
||||
InvalidHeartbeatResponseSnafu.fail()
|
||||
@@ -71,53 +136,6 @@ impl RegionHeartbeatResponseHandler {
|
||||
fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
|
||||
RegionId::new(region_ident.table_id, region_ident.region_number)
|
||||
}
|
||||
|
||||
fn reply_template_from_instruction(instruction: &Instruction) -> InstructionReply {
|
||||
match instruction {
|
||||
Instruction::OpenRegion(_) => InstructionReply::OpenRegion(SimpleReply {
|
||||
result: false,
|
||||
error: None,
|
||||
}),
|
||||
Instruction::CloseRegion(_) => InstructionReply::CloseRegion(SimpleReply {
|
||||
result: false,
|
||||
error: None,
|
||||
}),
|
||||
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
|
||||
InstructionReply::InvalidateTableCache(SimpleReply {
|
||||
result: false,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_reply(mut template: InstructionReply, result: Result<Output>) -> InstructionReply {
|
||||
let success = result.is_ok();
|
||||
let error = result.as_ref().map_err(|e| e.to_string()).err();
|
||||
match &mut template {
|
||||
InstructionReply::OpenRegion(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
InstructionReply::CloseRegion(reply) => match result {
|
||||
Err(e) => {
|
||||
if e.status_code() == StatusCode::RegionNotFound {
|
||||
reply.result = true;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
},
|
||||
InstructionReply::InvalidateTableCache(reply) => {
|
||||
reply.result = success;
|
||||
reply.error = error;
|
||||
}
|
||||
}
|
||||
|
||||
template
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -125,7 +143,9 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
|
||||
matches!(
|
||||
ctx.incoming_message.as_ref(),
|
||||
Some((_, Instruction::OpenRegion { .. })) | Some((_, Instruction::CloseRegion { .. }))
|
||||
Some((_, Instruction::OpenRegion { .. }))
|
||||
| Some((_, Instruction::CloseRegion { .. }))
|
||||
| Some((_, Instruction::DowngradeRegion { .. }))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -137,15 +157,11 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
|
||||
let mailbox = ctx.mailbox.clone();
|
||||
let region_server = self.region_server.clone();
|
||||
let reply_template = Self::reply_template_from_instruction(&instruction);
|
||||
let (region_id, region_req) = Self::instruction_to_request(instruction)?;
|
||||
let handler = Self::build_handler(instruction)?;
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
let result = region_server.handle_request(region_id, region_req).await;
|
||||
let reply = handler(region_server).await;
|
||||
|
||||
if let Err(e) = mailbox
|
||||
.send((meta, Self::fill_reply(reply_template, result)))
|
||||
.await
|
||||
{
|
||||
if let Err(e) = mailbox.send((meta, reply)).await {
|
||||
error!(e; "Failed to send reply to mailbox");
|
||||
}
|
||||
});
|
||||
@@ -153,3 +169,266 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
|
||||
Ok(HandleControl::Done)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::heartbeat::mailbox::{
|
||||
HeartbeatMailbox, IncomingMessage, MailboxRef, MessageMeta,
|
||||
};
|
||||
use mito2::config::MitoConfig;
|
||||
use mito2::engine::MITO_ENGINE_NAME;
|
||||
use mito2::test_util::{CreateRequestBuilder, TestEnv};
|
||||
use store_api::region_request::RegionRequest;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc::{self, Receiver};
|
||||
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use crate::tests::mock_region_server;
|
||||
|
||||
pub struct HeartbeatResponseTestEnv {
|
||||
mailbox: MailboxRef,
|
||||
receiver: Receiver<(MessageMeta, InstructionReply)>,
|
||||
}
|
||||
|
||||
impl HeartbeatResponseTestEnv {
|
||||
pub fn new() -> Self {
|
||||
let (tx, rx) = mpsc::channel(8);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
|
||||
|
||||
HeartbeatResponseTestEnv {
|
||||
mailbox,
|
||||
receiver: rx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_handler_ctx(
|
||||
&self,
|
||||
incoming_message: IncomingMessage,
|
||||
) -> HeartbeatResponseHandlerContext {
|
||||
HeartbeatResponseHandlerContext {
|
||||
mailbox: self.mailbox.clone(),
|
||||
response: Default::default(),
|
||||
incoming_message: Some(incoming_message),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn close_region_instruction(region_id: RegionId) -> Instruction {
|
||||
Instruction::CloseRegion(RegionIdent {
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn open_region_instruction(region_id: RegionId, path: &str) -> Instruction {
|
||||
Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
},
|
||||
path,
|
||||
HashMap::new(),
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_close_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("close-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let create_req = builder.build();
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
// Should be ok, if we try to close it twice.
|
||||
for _ in 0..2 {
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = close_region_instruction(region_id);
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::CloseRegion(reply) = reply {
|
||||
assert!(reply.result);
|
||||
assert!(reply.error.is_none());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
assert_matches!(
|
||||
region_server.set_writable(region_id, true).unwrap_err(),
|
||||
error::Error::RegionNotFound { .. }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open_region_ok() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let mut create_req = builder.build();
|
||||
let storage_path = "test";
|
||||
create_req.region_dir = region_dir(storage_path, region_id);
|
||||
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Close(RegionCloseRequest {}))
|
||||
.await
|
||||
.unwrap();
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
// Should be ok, if we try to open it twice.
|
||||
for _ in 0..2 {
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = open_region_instruction(region_id, storage_path);
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::OpenRegion(reply) = reply {
|
||||
assert!(reply.result);
|
||||
assert!(reply.error.is_none());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open_not_exists_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("open-not-exists-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let storage_path = "test";
|
||||
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = open_region_instruction(region_id, storage_path);
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::OpenRegion(reply) = reply {
|
||||
assert!(!reply.result);
|
||||
assert!(reply.error.is_some());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_downgrade_region() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let mut region_server = mock_region_server();
|
||||
let heartbeat_handler = RegionHeartbeatResponseHandler::new(region_server.clone());
|
||||
|
||||
let mut engine_env = TestEnv::with_prefix("downgrade-region");
|
||||
let engine = engine_env.create_engine(MitoConfig::default()).await;
|
||||
region_server.register_engine(Arc::new(engine));
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
|
||||
let builder = CreateRequestBuilder::new();
|
||||
let mut create_req = builder.build();
|
||||
let storage_path = "test";
|
||||
create_req.region_dir = region_dir(storage_path, region_id);
|
||||
|
||||
region_server
|
||||
.handle_request(region_id, RegionRequest::Create(create_req))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut heartbeat_env = HeartbeatResponseTestEnv::new();
|
||||
|
||||
// Should be ok, if we try to downgrade it twice.
|
||||
for _ in 0..2 {
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = Instruction::DowngradeRegion(DowngradeRegion { region_id });
|
||||
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::DowngradeRegion(reply) = reply {
|
||||
assert!(reply.exists);
|
||||
assert!(reply.error.is_none());
|
||||
assert_eq!(reply.last_entry_id.unwrap(), 0);
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
// Downgrades a not exists region.
|
||||
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
|
||||
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
|
||||
region_id: RegionId::new(2048, 1),
|
||||
});
|
||||
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
|
||||
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
|
||||
assert_matches!(control, HandleControl::Done);
|
||||
|
||||
let (_, reply) = heartbeat_env.receiver.recv().await.unwrap();
|
||||
|
||||
if let InstructionReply::DowngradeRegion(reply) = reply {
|
||||
assert!(!reply.exists);
|
||||
assert!(reply.error.is_none());
|
||||
assert!(reply.last_entry_id.is_none());
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,8 +24,6 @@ mod greptimedb_telemetry;
|
||||
pub mod heartbeat;
|
||||
pub mod metrics;
|
||||
pub mod region_server;
|
||||
pub mod server;
|
||||
mod store;
|
||||
#[cfg(test)]
|
||||
#[allow(dead_code)]
|
||||
mod tests;
|
||||
|
||||
@@ -18,6 +18,8 @@ use prometheus::*;
|
||||
/// Region request type label.
|
||||
pub const REGION_REQUEST_TYPE: &str = "datanode_region_request_type";
|
||||
|
||||
pub const REGION_ROLE: &str = "region_role";
|
||||
|
||||
lazy_static! {
|
||||
/// The elapsed time of handling a request in the region_server.
|
||||
pub static ref HANDLE_REGION_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
|
||||
@@ -26,4 +28,17 @@ lazy_static! {
|
||||
&[REGION_REQUEST_TYPE]
|
||||
)
|
||||
.unwrap();
|
||||
/// The elapsed time since the last received heartbeat.
|
||||
pub static ref LAST_RECEIVED_HEARTBEAT_ELAPSED: IntGauge = register_int_gauge!(
|
||||
"last_received_heartbeat_lease_elapsed",
|
||||
"last received heartbeat lease elapsed",
|
||||
)
|
||||
.unwrap();
|
||||
/// The received region leases via heartbeat.
|
||||
pub static ref HEARTBEAT_REGION_LEASES: IntGaugeVec = register_int_gauge_vec!(
|
||||
"heartbeat_region_leases",
|
||||
"received region leases via heartbeat",
|
||||
&[REGION_ROLE]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -28,6 +28,8 @@ use common_query::physical_plan::DfPhysicalPlanAdapter;
|
||||
use common_query::{DfPhysicalPlan, Output};
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_runtime::Runtime;
|
||||
use common_telemetry::tracing::{self, info_span};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{info, warn};
|
||||
use dashmap::DashMap;
|
||||
use datafusion::catalog::schema::SchemaProvider;
|
||||
@@ -47,7 +49,7 @@ use servers::grpc::region_server::RegionServerHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{RegionEngineRef, RegionRole};
|
||||
use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse};
|
||||
use store_api::region_request::{RegionCloseRequest, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
@@ -77,12 +79,27 @@ impl RegionServer {
|
||||
query_engine: QueryEngineRef,
|
||||
runtime: Arc<Runtime>,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
) -> Self {
|
||||
Self::with_table_provider(
|
||||
query_engine,
|
||||
runtime,
|
||||
event_listener,
|
||||
Arc::new(DummyTableProviderFactory),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn with_table_provider(
|
||||
query_engine: QueryEngineRef,
|
||||
runtime: Arc<Runtime>,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
table_provider_factory: TableProviderFactoryRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(RegionServerInner::new(
|
||||
query_engine,
|
||||
runtime,
|
||||
event_listener,
|
||||
table_provider_factory,
|
||||
)),
|
||||
}
|
||||
}
|
||||
@@ -99,6 +116,7 @@ impl RegionServer {
|
||||
self.inner.handle_request(region_id, request).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn handle_read(&self, request: QueryRequest) -> Result<SendableRecordBatchStream> {
|
||||
self.inner.handle_read(request).await
|
||||
}
|
||||
@@ -130,6 +148,19 @@ impl RegionServer {
|
||||
.with_context(|_| HandleRegionRequestSnafu { region_id })
|
||||
}
|
||||
|
||||
pub async fn set_readonly_gracefully(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
) -> Result<SetReadonlyResponse> {
|
||||
match self.inner.region_map.get(®ion_id) {
|
||||
Some(engine) => Ok(engine
|
||||
.set_readonly_gracefully(region_id)
|
||||
.await
|
||||
.with_context(|_| HandleRegionRequestSnafu { region_id })?),
|
||||
None => Ok(SetReadonlyResponse::NotFound),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runtime(&self) -> Arc<Runtime> {
|
||||
self.inner.runtime.clone()
|
||||
}
|
||||
@@ -154,9 +185,19 @@ impl RegionServerHandler for RegionServer {
|
||||
.context(BuildRegionRequestsSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?;
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
let join_tasks = requests.into_iter().map(|(region_id, req)| {
|
||||
let self_to_move = self.clone();
|
||||
async move { self_to_move.handle_request(region_id, req).await }
|
||||
let span = tracing_context.attach(info_span!(
|
||||
"RegionServer::handle_region_request",
|
||||
region_id = region_id.to_string()
|
||||
));
|
||||
async move {
|
||||
self_to_move
|
||||
.handle_request(region_id, req)
|
||||
.trace(span)
|
||||
.await
|
||||
}
|
||||
});
|
||||
|
||||
let results = try_join_all(join_tasks)
|
||||
@@ -198,15 +239,18 @@ impl FlightCraft for RegionServer {
|
||||
let ticket = request.into_inner().ticket;
|
||||
let request = QueryRequest::decode(ticket.as_ref())
|
||||
.context(servers_error::InvalidFlightTicketSnafu)?;
|
||||
let trace_id = request
|
||||
let tracing_context = request
|
||||
.header
|
||||
.as_ref()
|
||||
.map(|h| h.trace_id)
|
||||
.map(|h| TracingContext::from_w3c(&h.tracing_context))
|
||||
.unwrap_or_default();
|
||||
|
||||
let result = self.handle_read(request).await?;
|
||||
let result = self
|
||||
.handle_read(request)
|
||||
.trace(tracing_context.attach(info_span!("RegionServer::handle_read")))
|
||||
.await?;
|
||||
|
||||
let stream = Box::pin(FlightRecordBatchStream::new(result, trace_id));
|
||||
let stream = Box::pin(FlightRecordBatchStream::new(result, tracing_context));
|
||||
Ok(Response::new(stream))
|
||||
}
|
||||
}
|
||||
@@ -217,6 +261,7 @@ struct RegionServerInner {
|
||||
query_engine: QueryEngineRef,
|
||||
runtime: Arc<Runtime>,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
table_provider_factory: TableProviderFactoryRef,
|
||||
}
|
||||
|
||||
impl RegionServerInner {
|
||||
@@ -224,6 +269,7 @@ impl RegionServerInner {
|
||||
query_engine: QueryEngineRef,
|
||||
runtime: Arc<Runtime>,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
table_provider_factory: TableProviderFactoryRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
engines: RwLock::new(HashMap::new()),
|
||||
@@ -231,6 +277,7 @@ impl RegionServerInner {
|
||||
query_engine,
|
||||
runtime,
|
||||
event_listener,
|
||||
table_provider_factory,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -283,6 +330,10 @@ impl RegionServerInner {
|
||||
|
||||
let result = engine
|
||||
.handle_request(region_id, request)
|
||||
.trace(info_span!(
|
||||
"RegionEngine::handle_region_request",
|
||||
engine_type
|
||||
))
|
||||
.await
|
||||
.with_context(|_| HandleRegionRequestSnafu { region_id })?;
|
||||
|
||||
@@ -326,7 +377,13 @@ impl RegionServerInner {
|
||||
.get(®ion_id)
|
||||
.with_context(|| RegionNotFoundSnafu { region_id })?
|
||||
.clone();
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(region_id, engine).await?);
|
||||
|
||||
let table_provider = self
|
||||
.table_provider_factory
|
||||
.create(region_id, engine)
|
||||
.await?;
|
||||
|
||||
let catalog_list = Arc::new(DummyCatalogList::with_table_provider(table_provider));
|
||||
|
||||
// decode substrait plan to logical plan and execute it
|
||||
let logical_plan = DFLogicalSubstraitConvertor
|
||||
@@ -387,31 +444,16 @@ struct DummyCatalogList {
|
||||
}
|
||||
|
||||
impl DummyCatalogList {
|
||||
pub async fn new(region_id: RegionId, engine: RegionEngineRef) -> Result<Self> {
|
||||
let metadata =
|
||||
engine
|
||||
.get_metadata(region_id)
|
||||
.await
|
||||
.with_context(|_| GetRegionMetadataSnafu {
|
||||
engine: engine.name(),
|
||||
region_id,
|
||||
})?;
|
||||
let table_provider = DummyTableProvider {
|
||||
region_id,
|
||||
engine,
|
||||
metadata,
|
||||
scan_request: Default::default(),
|
||||
};
|
||||
fn with_table_provider(table_provider: Arc<dyn TableProvider>) -> Self {
|
||||
let schema_provider = DummySchemaProvider {
|
||||
table: table_provider,
|
||||
};
|
||||
let catalog_provider = DummyCatalogProvider {
|
||||
schema: schema_provider,
|
||||
};
|
||||
let catalog_list = Self {
|
||||
Self {
|
||||
catalog: catalog_provider,
|
||||
};
|
||||
Ok(catalog_list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,7 +502,7 @@ impl CatalogProvider for DummyCatalogProvider {
|
||||
/// For [DummyCatalogList].
|
||||
#[derive(Clone)]
|
||||
struct DummySchemaProvider {
|
||||
table: DummyTableProvider,
|
||||
table: Arc<dyn TableProvider>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -474,7 +516,7 @@ impl SchemaProvider for DummySchemaProvider {
|
||||
}
|
||||
|
||||
async fn table(&self, _name: &str) -> Option<Arc<dyn TableProvider>> {
|
||||
Some(Arc::new(self.table.clone()))
|
||||
Some(self.table.clone())
|
||||
}
|
||||
|
||||
fn table_exist(&self, _name: &str) -> bool {
|
||||
@@ -535,3 +577,40 @@ impl TableProvider for DummyTableProvider {
|
||||
Ok(vec![TableProviderFilterPushDown::Inexact; filters.len()])
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DummyTableProviderFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TableProviderFactory for DummyTableProviderFactory {
|
||||
async fn create(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
engine: RegionEngineRef,
|
||||
) -> Result<Arc<dyn TableProvider>> {
|
||||
let metadata =
|
||||
engine
|
||||
.get_metadata(region_id)
|
||||
.await
|
||||
.with_context(|_| GetRegionMetadataSnafu {
|
||||
engine: engine.name(),
|
||||
region_id,
|
||||
})?;
|
||||
Ok(Arc::new(DummyTableProvider {
|
||||
region_id,
|
||||
engine,
|
||||
metadata,
|
||||
scan_request: Default::default(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait TableProviderFactory: Send + Sync {
|
||||
async fn create(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
engine: RegionEngineRef,
|
||||
) -> Result<Arc<dyn TableProvider>>;
|
||||
}
|
||||
|
||||
pub type TableProviderFactoryRef = Arc<dyn TableProviderFactory>;
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future;
|
||||
use servers::grpc::{GrpcServer, GrpcServerConfig};
|
||||
use servers::http::{HttpServer, HttpServerBuilder};
|
||||
use servers::metrics_handler::MetricsHandler;
|
||||
use servers::server::Server;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::config::DatanodeOptions;
|
||||
use crate::error::{
|
||||
ParseAddrSnafu, Result, ShutdownServerSnafu, StartServerSnafu, WaitForGrpcServingSnafu,
|
||||
};
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
/// All rpc services.
|
||||
pub struct Services {
|
||||
grpc_server: GrpcServer,
|
||||
http_server: HttpServer,
|
||||
}
|
||||
|
||||
impl Services {
|
||||
pub async fn try_new(region_server: RegionServer, opts: &DatanodeOptions) -> Result<Self> {
|
||||
let flight_handler = Some(Arc::new(region_server.clone()) as _);
|
||||
let region_server_handler = Some(Arc::new(region_server.clone()) as _);
|
||||
let runtime = region_server.runtime();
|
||||
let grpc_config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.rpc_max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.rpc_max_send_message_size.as_bytes() as usize,
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
grpc_server: GrpcServer::new(
|
||||
Some(grpc_config),
|
||||
None,
|
||||
None,
|
||||
flight_handler,
|
||||
region_server_handler,
|
||||
None,
|
||||
runtime,
|
||||
),
|
||||
http_server: HttpServerBuilder::new(opts.http.clone())
|
||||
.with_metrics_handler(MetricsHandler)
|
||||
.with_greptime_config_options(opts.to_toml_string())
|
||||
.build(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start(&mut self, opts: &DatanodeOptions) -> Result<()> {
|
||||
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.rpc_addr,
|
||||
})?;
|
||||
let http_addr = opts.http.addr.parse().context(ParseAddrSnafu {
|
||||
addr: &opts.http.addr,
|
||||
})?;
|
||||
let grpc = self.grpc_server.start(grpc_addr);
|
||||
let http = self.http_server.start(http_addr);
|
||||
let _ = future::try_join_all(vec![grpc, http])
|
||||
.await
|
||||
.context(StartServerSnafu)?;
|
||||
|
||||
self.grpc_server
|
||||
.wait_for_serve()
|
||||
.await
|
||||
.context(WaitForGrpcServingSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
self.grpc_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownServerSnafu)?;
|
||||
self.http_server
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownServerSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -66,23 +66,9 @@ pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result<ObjectSto
|
||||
.with_error_level(Some("debug"))
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(TracingLayer);
|
||||
|
||||
// In the test environment, multiple datanodes will be started in the same process.
|
||||
// If each datanode registers Prometheus metric when it starts, it will cause the program to crash. (Because the same metric is registered repeatedly.)
|
||||
// So the Prometheus metric layer is disabled in the test environment.
|
||||
#[cfg(feature = "testing")]
|
||||
return Ok(store);
|
||||
|
||||
#[cfg(not(feature = "testing"))]
|
||||
{
|
||||
let registry = prometheus::default_registry();
|
||||
Ok(
|
||||
store.layer(object_store::layers::PrometheusLayer::with_registry(
|
||||
registry.clone(),
|
||||
)),
|
||||
)
|
||||
}
|
||||
.layer(TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer);
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
async fn create_object_store_with_cache(
|
||||
|
||||
@@ -13,19 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::HeartbeatResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
|
||||
use common_function::scalars::FunctionRef;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
|
||||
};
|
||||
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
|
||||
use common_meta::instruction::{Instruction, OpenRegion, RegionIdent};
|
||||
use common_query::prelude::ScalarUdf;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
@@ -37,7 +30,7 @@ use query::query_engine::DescribeResult;
|
||||
use query::QueryEngine;
|
||||
use session::context::QueryContextRef;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{RegionEngine, RegionRole};
|
||||
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
|
||||
use store_api::region_request::RegionRequest;
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
use table::TableRef;
|
||||
@@ -46,51 +39,6 @@ use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use crate::event_listener::NoopRegionServerEventListener;
|
||||
use crate::region_server::RegionServer;
|
||||
|
||||
pub fn test_message_meta(id: u64, subject: &str, to: &str, from: &str) -> MessageMeta {
|
||||
MessageMeta {
|
||||
id,
|
||||
subject: subject.to_string(),
|
||||
to: to.to_string(),
|
||||
from: from.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_instruction(
|
||||
executor: Arc<dyn HeartbeatResponseHandlerExecutor>,
|
||||
mailbox: Arc<HeartbeatMailbox>,
|
||||
instruction: Instruction,
|
||||
) {
|
||||
let response = HeartbeatResponse::default();
|
||||
let mut ctx: HeartbeatResponseHandlerContext =
|
||||
HeartbeatResponseHandlerContext::new(mailbox, response);
|
||||
ctx.incoming_message = Some((test_message_meta(1, "hi", "foo", "bar"), instruction));
|
||||
executor.handle(ctx).await.unwrap();
|
||||
}
|
||||
|
||||
fn close_region_instruction() -> Instruction {
|
||||
Instruction::CloseRegion(RegionIdent {
|
||||
table_id: 1024,
|
||||
region_number: 0,
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: "mito2".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn open_region_instruction() -> Instruction {
|
||||
Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
table_id: 1024,
|
||||
region_number: 0,
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: "mito2".to_string(),
|
||||
},
|
||||
"path/dir",
|
||||
HashMap::new(),
|
||||
))
|
||||
}
|
||||
|
||||
pub struct MockQueryEngine;
|
||||
|
||||
#[async_trait]
|
||||
@@ -191,6 +139,13 @@ impl RegionEngine for MockRegionEngine {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_readonly_gracefully(
|
||||
&self,
|
||||
_region_id: RegionId,
|
||||
) -> Result<SetReadonlyResponse, BoxedError> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn role(&self, _region_id: RegionId) -> Option<RegionRole> {
|
||||
Some(RegionRole::Leader)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ arrow-array.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
arrow.workspace = true
|
||||
common-base.workspace = true
|
||||
common-decimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
@@ -23,6 +24,7 @@ num = "0.4"
|
||||
num-traits = "0.2"
|
||||
ordered-float = { version = "3.0", features = ["serde"] }
|
||||
paste = "1.0"
|
||||
rust_decimal = "1.32.0"
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -19,6 +19,8 @@ use arrow::compute::cast as arrow_array_cast;
|
||||
use arrow::datatypes::{
|
||||
DataType as ArrowDataType, IntervalUnit as ArrowIntervalUnit, TimeUnit as ArrowTimeUnit,
|
||||
};
|
||||
use arrow_schema::DECIMAL_DEFAULT_SCALE;
|
||||
use common_decimal::decimal128::DECIMAL128_MAX_PRECISION;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use paste::paste;
|
||||
@@ -27,13 +29,13 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::type_id::LogicalTypeId;
|
||||
use crate::types::{
|
||||
BinaryType, BooleanType, DateTimeType, DateType, DictionaryType, DurationMicrosecondType,
|
||||
DurationMillisecondType, DurationNanosecondType, DurationSecondType, DurationType, Float32Type,
|
||||
Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, IntervalDayTimeType,
|
||||
IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, ListType, NullType, StringType,
|
||||
TimeMillisecondType, TimeType, TimestampMicrosecondType, TimestampMillisecondType,
|
||||
TimestampNanosecondType, TimestampSecondType, TimestampType, UInt16Type, UInt32Type,
|
||||
UInt64Type, UInt8Type,
|
||||
BinaryType, BooleanType, DateTimeType, DateType, Decimal128Type, DictionaryType,
|
||||
DurationMicrosecondType, DurationMillisecondType, DurationNanosecondType, DurationSecondType,
|
||||
DurationType, Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type,
|
||||
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, ListType,
|
||||
NullType, StringType, TimeMillisecondType, TimeType, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType,
|
||||
UInt16Type, UInt32Type, UInt64Type, UInt8Type,
|
||||
};
|
||||
use crate::value::Value;
|
||||
use crate::vectors::MutableVector;
|
||||
@@ -56,6 +58,9 @@ pub enum ConcreteDataType {
|
||||
Float32(Float32Type),
|
||||
Float64(Float64Type),
|
||||
|
||||
// Decimal128 type:
|
||||
Decimal128(Decimal128Type),
|
||||
|
||||
// String types:
|
||||
Binary(BinaryType),
|
||||
String(StringType),
|
||||
@@ -80,28 +85,48 @@ pub enum ConcreteDataType {
|
||||
impl fmt::Display for ConcreteDataType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ConcreteDataType::Null(_) => write!(f, "Null"),
|
||||
ConcreteDataType::Boolean(_) => write!(f, "Boolean"),
|
||||
ConcreteDataType::Int8(_) => write!(f, "Int8"),
|
||||
ConcreteDataType::Int16(_) => write!(f, "Int16"),
|
||||
ConcreteDataType::Int32(_) => write!(f, "Int32"),
|
||||
ConcreteDataType::Int64(_) => write!(f, "Int64"),
|
||||
ConcreteDataType::UInt8(_) => write!(f, "UInt8"),
|
||||
ConcreteDataType::UInt16(_) => write!(f, "UInt16"),
|
||||
ConcreteDataType::UInt32(_) => write!(f, "UInt32"),
|
||||
ConcreteDataType::UInt64(_) => write!(f, "UInt64"),
|
||||
ConcreteDataType::Float32(_) => write!(f, "Float32"),
|
||||
ConcreteDataType::Float64(_) => write!(f, "Float64"),
|
||||
ConcreteDataType::Binary(_) => write!(f, "Binary"),
|
||||
ConcreteDataType::String(_) => write!(f, "String"),
|
||||
ConcreteDataType::Date(_) => write!(f, "Date"),
|
||||
ConcreteDataType::DateTime(_) => write!(f, "DateTime"),
|
||||
ConcreteDataType::Timestamp(_) => write!(f, "Timestamp"),
|
||||
ConcreteDataType::Time(_) => write!(f, "Time"),
|
||||
ConcreteDataType::List(_) => write!(f, "List"),
|
||||
ConcreteDataType::Dictionary(_) => write!(f, "Dictionary"),
|
||||
ConcreteDataType::Interval(_) => write!(f, "Interval"),
|
||||
ConcreteDataType::Duration(_) => write!(f, "Duration"),
|
||||
ConcreteDataType::Null(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Boolean(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Int8(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Int16(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Int32(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Int64(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::UInt8(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::UInt16(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::UInt32(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::UInt64(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Float32(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Float64(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Binary(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::String(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Date(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::DateTime(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Timestamp(t) => match t {
|
||||
TimestampType::Second(v) => write!(f, "{}", v.name()),
|
||||
TimestampType::Millisecond(v) => write!(f, "{}", v.name()),
|
||||
TimestampType::Microsecond(v) => write!(f, "{}", v.name()),
|
||||
TimestampType::Nanosecond(v) => write!(f, "{}", v.name()),
|
||||
},
|
||||
ConcreteDataType::Time(t) => match t {
|
||||
TimeType::Second(v) => write!(f, "{}", v.name()),
|
||||
TimeType::Millisecond(v) => write!(f, "{}", v.name()),
|
||||
TimeType::Microsecond(v) => write!(f, "{}", v.name()),
|
||||
TimeType::Nanosecond(v) => write!(f, "{}", v.name()),
|
||||
},
|
||||
ConcreteDataType::Interval(i) => match i {
|
||||
IntervalType::YearMonth(v) => write!(f, "{}", v.name()),
|
||||
IntervalType::DayTime(v) => write!(f, "{}", v.name()),
|
||||
IntervalType::MonthDayNano(v) => write!(f, "{}", v.name()),
|
||||
},
|
||||
ConcreteDataType::Duration(d) => match d {
|
||||
DurationType::Second(v) => write!(f, "{}", v.name()),
|
||||
DurationType::Millisecond(v) => write!(f, "{}", v.name()),
|
||||
DurationType::Microsecond(v) => write!(f, "{}", v.name()),
|
||||
DurationType::Nanosecond(v) => write!(f, "{}", v.name()),
|
||||
},
|
||||
ConcreteDataType::Decimal128(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::List(v) => write!(f, "{}", v.name()),
|
||||
ConcreteDataType::Dictionary(v) => write!(f, "{}", v.name()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -134,6 +159,7 @@ impl ConcreteDataType {
|
||||
| ConcreteDataType::Time(_)
|
||||
| ConcreteDataType::Interval(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Decimal128(_)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -150,6 +176,7 @@ impl ConcreteDataType {
|
||||
| ConcreteDataType::Time(_)
|
||||
| ConcreteDataType::Interval(_)
|
||||
| ConcreteDataType::Duration(_)
|
||||
| ConcreteDataType::Decimal128(_)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -183,6 +210,10 @@ impl ConcreteDataType {
|
||||
matches!(self, ConcreteDataType::Timestamp(_))
|
||||
}
|
||||
|
||||
pub fn is_decimal(&self) -> bool {
|
||||
matches!(self, ConcreteDataType::Decimal128(_))
|
||||
}
|
||||
|
||||
pub fn numerics() -> Vec<ConcreteDataType> {
|
||||
vec![
|
||||
ConcreteDataType::int8_datatype(),
|
||||
@@ -235,6 +266,13 @@ impl ConcreteDataType {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_decimal128(&self) -> Option<Decimal128Type> {
|
||||
match self {
|
||||
ConcreteDataType::Decimal128(d) => Some(*d),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the data type can cast to another data type.
|
||||
pub fn can_arrow_type_cast_to(&self, to_type: &ConcreteDataType) -> bool {
|
||||
let array = arrow_array::new_empty_array(&self.as_arrow_type());
|
||||
@@ -292,6 +330,9 @@ impl TryFrom<&ArrowDataType> for ConcreteDataType {
|
||||
ArrowDataType::Duration(u) => {
|
||||
ConcreteDataType::Duration(DurationType::from_unit(u.into()))
|
||||
}
|
||||
ArrowDataType::Decimal128(precision, scale) => {
|
||||
ConcreteDataType::decimal128_datatype(*precision, *scale)
|
||||
}
|
||||
_ => {
|
||||
return error::UnsupportedArrowTypeSnafu {
|
||||
arrow_type: dt.clone(),
|
||||
@@ -454,13 +495,21 @@ impl ConcreteDataType {
|
||||
) -> ConcreteDataType {
|
||||
ConcreteDataType::Dictionary(DictionaryType::new(key_type, value_type))
|
||||
}
|
||||
|
||||
pub fn decimal128_datatype(precision: u8, scale: i8) -> ConcreteDataType {
|
||||
ConcreteDataType::Decimal128(Decimal128Type::new(precision, scale))
|
||||
}
|
||||
|
||||
pub fn decimal128_default_datatype() -> ConcreteDataType {
|
||||
Self::decimal128_datatype(DECIMAL128_MAX_PRECISION, DECIMAL_DEFAULT_SCALE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Data type abstraction.
|
||||
#[enum_dispatch::enum_dispatch]
|
||||
pub trait DataType: std::fmt::Debug + Send + Sync {
|
||||
/// Name of this data type.
|
||||
fn name(&self) -> &str;
|
||||
fn name(&self) -> String;
|
||||
|
||||
/// Returns id of the Logical data type.
|
||||
fn logical_type_id(&self) -> LogicalTypeId;
|
||||
@@ -491,7 +540,7 @@ mod tests {
|
||||
fn test_concrete_type_as_datatype_trait() {
|
||||
let concrete_type = ConcreteDataType::boolean_datatype();
|
||||
|
||||
assert_eq!("Boolean", concrete_type.name());
|
||||
assert_eq!("Boolean", concrete_type.to_string());
|
||||
assert_eq!(Value::Boolean(false), concrete_type.default_value());
|
||||
assert_eq!(LogicalTypeId::Boolean, concrete_type.logical_type_id());
|
||||
assert_eq!(ArrowDataType::Boolean, concrete_type.as_arrow_type());
|
||||
@@ -613,6 +662,14 @@ mod tests {
|
||||
assert!(ConcreteDataType::boolean_datatype().is_boolean());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_decimal() {
|
||||
assert!(!ConcreteDataType::int32_datatype().is_decimal());
|
||||
assert!(!ConcreteDataType::float32_datatype().is_decimal());
|
||||
assert!(ConcreteDataType::decimal128_datatype(10, 2).is_decimal());
|
||||
assert!(ConcreteDataType::decimal128_datatype(18, 6).is_decimal());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_stringifiable() {
|
||||
assert!(!ConcreteDataType::int32_datatype().is_stringifiable());
|
||||
@@ -637,6 +694,7 @@ mod tests {
|
||||
assert!(ConcreteDataType::duration_millisecond_datatype().is_stringifiable());
|
||||
assert!(ConcreteDataType::duration_microsecond_datatype().is_stringifiable());
|
||||
assert!(ConcreteDataType::duration_nanosecond_datatype().is_stringifiable());
|
||||
assert!(ConcreteDataType::decimal128_datatype(10, 2).is_stringifiable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -670,6 +728,8 @@ mod tests {
|
||||
|
||||
assert!(!ConcreteDataType::float32_datatype().is_signed());
|
||||
assert!(!ConcreteDataType::float64_datatype().is_signed());
|
||||
|
||||
assert!(ConcreteDataType::decimal128_datatype(10, 2).is_signed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -695,6 +755,7 @@ mod tests {
|
||||
assert!(!ConcreteDataType::duration_millisecond_datatype().is_unsigned());
|
||||
assert!(!ConcreteDataType::duration_microsecond_datatype().is_unsigned());
|
||||
assert!(!ConcreteDataType::duration_nanosecond_datatype().is_unsigned());
|
||||
assert!(!ConcreteDataType::decimal128_datatype(10, 2).is_unsigned());
|
||||
|
||||
assert!(ConcreteDataType::uint8_datatype().is_unsigned());
|
||||
assert!(ConcreteDataType::uint16_datatype().is_unsigned());
|
||||
@@ -723,90 +784,68 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_display_concrete_data_type() {
|
||||
assert_eq!(ConcreteDataType::null_datatype().to_string(), "Null");
|
||||
assert_eq!(ConcreteDataType::boolean_datatype().to_string(), "Boolean");
|
||||
assert_eq!(ConcreteDataType::binary_datatype().to_string(), "Binary");
|
||||
assert_eq!(ConcreteDataType::int8_datatype().to_string(), "Int8");
|
||||
assert_eq!(ConcreteDataType::int16_datatype().to_string(), "Int16");
|
||||
assert_eq!(ConcreteDataType::int32_datatype().to_string(), "Int32");
|
||||
assert_eq!(ConcreteDataType::int64_datatype().to_string(), "Int64");
|
||||
assert_eq!(ConcreteDataType::uint8_datatype().to_string(), "UInt8");
|
||||
assert_eq!(ConcreteDataType::uint16_datatype().to_string(), "UInt16");
|
||||
assert_eq!(ConcreteDataType::uint32_datatype().to_string(), "UInt32");
|
||||
assert_eq!(ConcreteDataType::uint64_datatype().to_string(), "UInt64");
|
||||
assert_eq!(ConcreteDataType::float32_datatype().to_string(), "Float32");
|
||||
assert_eq!(ConcreteDataType::float64_datatype().to_string(), "Float64");
|
||||
assert_eq!(ConcreteDataType::string_datatype().to_string(), "String");
|
||||
assert_eq!(ConcreteDataType::date_datatype().to_string(), "Date");
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Null).to_string(),
|
||||
"Null"
|
||||
ConcreteDataType::timestamp_millisecond_datatype().to_string(),
|
||||
"TimestampMillisecond"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Boolean).to_string(),
|
||||
"Boolean"
|
||||
ConcreteDataType::time_millisecond_datatype().to_string(),
|
||||
"TimeMillisecond"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Binary).to_string(),
|
||||
"Binary"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::LargeBinary).to_string(),
|
||||
"Binary"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int8).to_string(),
|
||||
"Int8"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int16).to_string(),
|
||||
"Int16"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int32).to_string(),
|
||||
"Int32"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Int64).to_string(),
|
||||
"Int64"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt8).to_string(),
|
||||
"UInt8"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt16).to_string(),
|
||||
"UInt16"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt32).to_string(),
|
||||
"UInt32"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::UInt64).to_string(),
|
||||
"UInt64"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Float32).to_string(),
|
||||
"Float32"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Float64).to_string(),
|
||||
"Float64"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Utf8).to_string(),
|
||||
"String"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::List(Arc::new(Field::new(
|
||||
"item",
|
||||
ArrowDataType::Int32,
|
||||
true,
|
||||
))))
|
||||
.to_string(),
|
||||
"List"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Date32).to_string(),
|
||||
"Date"
|
||||
);
|
||||
assert_eq!(ConcreteDataType::time_second_datatype().to_string(), "Time");
|
||||
assert_eq!(
|
||||
ConcreteDataType::from_arrow_type(&ArrowDataType::Interval(
|
||||
arrow_schema::IntervalUnit::MonthDayNano,
|
||||
))
|
||||
.to_string(),
|
||||
"Interval"
|
||||
ConcreteDataType::interval_month_day_nano_datatype().to_string(),
|
||||
"IntervalMonthDayNano"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::duration_second_datatype().to_string(),
|
||||
"Duration"
|
||||
"DurationSecond"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2).to_string(),
|
||||
"Decimal(10, 2)"
|
||||
);
|
||||
// Nested types
|
||||
assert_eq!(
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype()).to_string(),
|
||||
"List<Int32>"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::Dictionary(DictionaryType::new(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
)))
|
||||
.to_string(),
|
||||
"List<Dictionary<Int32, String>>"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::list_datatype(
|
||||
ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype())
|
||||
))
|
||||
.to_string(),
|
||||
"List<List<List<Int32>>>"
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::dictionary_datatype(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
)
|
||||
.to_string(),
|
||||
"Dictionary<Int32, String>"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +122,23 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to unpack value to given type: {}", reason))]
|
||||
TryFromValue { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to specify the precision {} and scale {}", precision, scale))]
|
||||
InvalidPrecisionOrScale {
|
||||
precision: u8,
|
||||
scale: i8,
|
||||
#[snafu(source)]
|
||||
error: arrow::error::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Value exceeds the precision {} bound", precision))]
|
||||
ValueExceedsPrecision {
|
||||
precision: u8,
|
||||
#[snafu(source)]
|
||||
error: arrow::error::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_decimal::Decimal128;
|
||||
use common_time::{Date, DateTime};
|
||||
|
||||
use crate::types::{
|
||||
@@ -22,8 +23,8 @@ use crate::types::{
|
||||
};
|
||||
use crate::value::{ListValue, ListValueRef, Value};
|
||||
use crate::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, ListVector, MutableVector,
|
||||
PrimitiveVector, StringVector, Vector,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, ListVector,
|
||||
MutableVector, PrimitiveVector, StringVector, Vector,
|
||||
};
|
||||
|
||||
fn get_iter_capacity<T, I: Iterator<Item = T>>(iter: &I) -> usize {
|
||||
@@ -277,6 +278,27 @@ impl<'a> ScalarRef<'a> for Date {
|
||||
}
|
||||
}
|
||||
|
||||
impl Scalar for Decimal128 {
|
||||
type VectorType = Decimal128Vector;
|
||||
type RefType<'a> = Decimal128;
|
||||
|
||||
fn as_scalar_ref(&self) -> Self::RefType<'_> {
|
||||
*self
|
||||
}
|
||||
|
||||
fn upcast_gat<'short, 'long: 'short>(long: Self::RefType<'long>) -> Self::RefType<'short> {
|
||||
long
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ScalarRef<'a> for Decimal128 {
|
||||
type ScalarType = Decimal128;
|
||||
|
||||
fn to_owned_scalar(&self) -> Self::ScalarType {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl Scalar for DateTime {
|
||||
type VectorType = DateTimeVector;
|
||||
type RefType<'a> = DateTime;
|
||||
@@ -396,6 +418,13 @@ mod tests {
|
||||
assert_eq!(date, date.to_owned_scalar());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal_scalar() {
|
||||
let decimal = Decimal128::new(1, 1, 1);
|
||||
assert_eq!(decimal, decimal.as_scalar_ref());
|
||||
assert_eq!(decimal, decimal.to_owned_scalar());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_scalar() {
|
||||
let dt = DateTime::new(123);
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod column_schema;
|
||||
mod constraint;
|
||||
pub mod constraint;
|
||||
mod raw;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -24,9 +24,9 @@ use crate::value::Value;
|
||||
use crate::vectors::operations::VectorOp;
|
||||
use crate::vectors::{TimestampMillisecondVector, VectorRef};
|
||||
|
||||
const CURRENT_TIMESTAMP: &str = "current_timestamp";
|
||||
const CURRENT_TIMESTAMP_FN: &str = "current_timestamp()";
|
||||
const NOW_FN: &str = "now()";
|
||||
pub const CURRENT_TIMESTAMP: &str = "current_timestamp";
|
||||
pub const CURRENT_TIMESTAMP_FN: &str = "current_timestamp()";
|
||||
pub const NOW_FN: &str = "now()";
|
||||
|
||||
/// Column's default constraint.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
|
||||
@@ -30,6 +30,8 @@ pub enum LogicalTypeId {
|
||||
Float32,
|
||||
Float64,
|
||||
|
||||
Decimal128,
|
||||
|
||||
// String types:
|
||||
String,
|
||||
Binary,
|
||||
@@ -123,6 +125,7 @@ impl LogicalTypeId {
|
||||
LogicalTypeId::DurationMillisecond => ConcreteDataType::duration_millisecond_datatype(),
|
||||
LogicalTypeId::DurationMicrosecond => ConcreteDataType::duration_microsecond_datatype(),
|
||||
LogicalTypeId::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||
LogicalTypeId::Decimal128 => ConcreteDataType::decimal128_default_datatype(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ mod boolean_type;
|
||||
pub mod cast;
|
||||
mod date_type;
|
||||
mod datetime_type;
|
||||
mod decimal_type;
|
||||
mod dictionary_type;
|
||||
mod duration_type;
|
||||
mod interval_type;
|
||||
@@ -29,9 +30,10 @@ mod timestamp_type;
|
||||
|
||||
pub use binary_type::BinaryType;
|
||||
pub use boolean_type::BooleanType;
|
||||
pub use cast::cast_with_opt;
|
||||
pub use cast::{cast, cast_with_opt};
|
||||
pub use date_type::DateType;
|
||||
pub use datetime_type::DateTimeType;
|
||||
pub use decimal_type::Decimal128Type;
|
||||
pub use dictionary_type::DictionaryType;
|
||||
pub use duration_type::{
|
||||
DurationMicrosecondType, DurationMillisecondType, DurationNanosecondType, DurationSecondType,
|
||||
|
||||
@@ -34,8 +34,8 @@ impl BinaryType {
|
||||
}
|
||||
|
||||
impl DataType for BinaryType {
|
||||
fn name(&self) -> &str {
|
||||
"Binary"
|
||||
fn name(&self) -> String {
|
||||
"Binary".to_string()
|
||||
}
|
||||
|
||||
fn logical_type_id(&self) -> LogicalTypeId {
|
||||
|
||||
@@ -34,8 +34,8 @@ impl BooleanType {
|
||||
}
|
||||
|
||||
impl DataType for BooleanType {
|
||||
fn name(&self) -> &str {
|
||||
"Boolean"
|
||||
fn name(&self) -> String {
|
||||
"Boolean".to_string()
|
||||
}
|
||||
|
||||
fn logical_type_id(&self) -> LogicalTypeId {
|
||||
|
||||
@@ -16,6 +16,21 @@ use crate::data_type::{ConcreteDataType, DataType};
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::types::TimeType;
|
||||
use crate::value::Value;
|
||||
use crate::vectors::Helper;
|
||||
|
||||
/// Used to cast the value to dest ConcreteDataType temporarily.
|
||||
/// To keep the same behavior as arrow-rs.
|
||||
pub fn cast(src_value: Value, dest_type: &ConcreteDataType) -> Result<Value> {
|
||||
if src_value == Value::Null {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let src_type = src_value.data_type();
|
||||
let scalar_value = src_value.try_to_scalar_value(&src_type)?;
|
||||
let new_value = Helper::try_from_scalar_value(scalar_value, 1)?
|
||||
.cast(dest_type)?
|
||||
.get(0);
|
||||
Ok(new_value)
|
||||
}
|
||||
|
||||
/// Cast options for cast functions.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)]
|
||||
@@ -233,7 +248,7 @@ mod tests {
|
||||
assert!(res.is_err());
|
||||
assert_eq!(
|
||||
res.unwrap_err().to_string(),
|
||||
"Type Timestamp with value 1970-01-01 08:00:10+0800 can't be cast to the destination type Int8"
|
||||
"Type TimestampSecond with value 1970-01-01 08:00:10+0800 can't be cast to the destination type Int8"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ use crate::vectors::{DateVector, DateVectorBuilder, MutableVector, Vector};
|
||||
pub struct DateType;
|
||||
|
||||
impl DataType for DateType {
|
||||
fn name(&self) -> &str {
|
||||
"Date"
|
||||
fn name(&self) -> String {
|
||||
"Date".to_string()
|
||||
}
|
||||
|
||||
fn logical_type_id(&self) -> LogicalTypeId {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user