mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 07:30:02 +00:00
Compare commits
126 Commits
v0.5.0-nig
...
script_wra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24f5e56196 | ||
|
|
c85d569797 | ||
|
|
e95a8e070c | ||
|
|
b71bf11772 | ||
|
|
ee0a3972fc | ||
|
|
8fb40c66a4 | ||
|
|
e855f6370e | ||
|
|
fb5dcbc40c | ||
|
|
0d109436b8 | ||
|
|
cbae03af07 | ||
|
|
902e6ead60 | ||
|
|
f9e7762c5b | ||
|
|
0b421b5177 | ||
|
|
aa89d9deef | ||
|
|
b3ffe5cd1e | ||
|
|
d6ef7a75de | ||
|
|
6344b1e0db | ||
|
|
7d506b3c5f | ||
|
|
96e12e9ee5 | ||
|
|
a9db80ab1a | ||
|
|
5f5dbe0172 | ||
|
|
dac7a41cbd | ||
|
|
de416465a6 | ||
|
|
58c13739f0 | ||
|
|
806400caff | ||
|
|
f78dab078c | ||
|
|
7a14db68a6 | ||
|
|
c26f2f94c0 | ||
|
|
781f2422b3 | ||
|
|
7e68ecc498 | ||
|
|
9ce9421850 | ||
|
|
c0df2b9086 | ||
|
|
29d344ccd2 | ||
|
|
fe2fc723bc | ||
|
|
2332305b90 | ||
|
|
9ccd182109 | ||
|
|
ae8153515b | ||
|
|
cce5edc88e | ||
|
|
616eb04914 | ||
|
|
7c53f92e4b | ||
|
|
445bd92c7a | ||
|
|
92a9802343 | ||
|
|
abbac46c05 | ||
|
|
d0d0f091f0 | ||
|
|
707a0d5626 | ||
|
|
e42767d500 | ||
|
|
ca18ccf7d4 | ||
|
|
b1d8812806 | ||
|
|
7547e7ebdf | ||
|
|
6100cb335a | ||
|
|
0badb3715e | ||
|
|
bd9c2f2666 | ||
|
|
b3edbef1f3 | ||
|
|
9e58bba363 | ||
|
|
3a4c9f2b45 | ||
|
|
64a36e9b36 | ||
|
|
33566ea0f0 | ||
|
|
ff8ab6763b | ||
|
|
00e4bd45f0 | ||
|
|
85eebcb16f | ||
|
|
102e43aace | ||
|
|
56fc77e573 | ||
|
|
4c76d4d97e | ||
|
|
9e5cdf47d9 | ||
|
|
bdb677dc52 | ||
|
|
99dbb7401c | ||
|
|
a7bbd61f28 | ||
|
|
efc5abfc02 | ||
|
|
43a7457e15 | ||
|
|
20f01219e9 | ||
|
|
dc351a6de9 | ||
|
|
5f87b1f714 | ||
|
|
b9146c88ff | ||
|
|
9558b3c201 | ||
|
|
da68d8ce4b | ||
|
|
01867adaa7 | ||
|
|
d9eeeee06e | ||
|
|
4fcda272fb | ||
|
|
ce959ddd3f | ||
|
|
730a3faa02 | ||
|
|
91820a8006 | ||
|
|
500e299e40 | ||
|
|
ac4b6cd7f0 | ||
|
|
3ab494764f | ||
|
|
5608035074 | ||
|
|
e083b8011c | ||
|
|
06327fba1e | ||
|
|
06da33b1ed | ||
|
|
2aa6ac5731 | ||
|
|
b28af9443b | ||
|
|
142035340d | ||
|
|
d2cf72e0f1 | ||
|
|
ae27fbc7f2 | ||
|
|
9bd10134dd | ||
|
|
3329da5b72 | ||
|
|
a24f8c96b3 | ||
|
|
a691cff0c4 | ||
|
|
f92b55c745 | ||
|
|
a9e5b902fd | ||
|
|
5b978269cc | ||
|
|
3dffc7b62c | ||
|
|
968c872d15 | ||
|
|
e2a770f8de | ||
|
|
dc46e96879 | ||
|
|
8f3b299a45 | ||
|
|
506e6887f3 | ||
|
|
1757061272 | ||
|
|
6599bb5a46 | ||
|
|
3f981ef2b3 | ||
|
|
5cff735e02 | ||
|
|
f5eede4ce1 | ||
|
|
22ee45f3df | ||
|
|
8fd0766754 | ||
|
|
af7107565a | ||
|
|
f02dc0e274 | ||
|
|
b53537e69b | ||
|
|
0cd6dacb45 | ||
|
|
a3611516a2 | ||
|
|
93f21b188d | ||
|
|
b9a7c2db7e | ||
|
|
c62ba79759 | ||
|
|
9d029f7337 | ||
|
|
f1e8afcda9 | ||
|
|
9697632888 | ||
|
|
69ee2c336c | ||
|
|
1f57c6b1f0 |
@@ -12,9 +12,4 @@ rustflags = [
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
|
||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
||||
"-Aclippy::needless_pass_by_ref_mut",
|
||||
]
|
||||
|
||||
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -41,13 +41,27 @@ body:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: What happened?
|
||||
label: Minimal reproduce step
|
||||
description: |
|
||||
Tell us what happened and also what you would have expected to
|
||||
happen instead.
|
||||
placeholder: "Describe the bug"
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-manner
|
||||
attributes:
|
||||
label: What did you expect to see?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-manner
|
||||
attributes:
|
||||
label: What did you see instead?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -72,14 +86,3 @@ body:
|
||||
trace. This will be automatically formatted into code, so no
|
||||
need for backticks.
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: How can we reproduce the bug?
|
||||
description: |
|
||||
Please walk us through and provide steps and details on how
|
||||
to reproduce the issue. If possible, provide scripts that we
|
||||
can run to trigger the bug.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -31,10 +31,12 @@ runs:
|
||||
echo "prerelease=false" >> $GITHUB_ENV
|
||||
echo "makeLatest=true" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
||||
echo "omitBody=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "prerelease=true" >> $GITHUB_ENV
|
||||
echo "makeLatest=false" >> $GITHUB_ENV
|
||||
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
||||
echo "omitBody=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Publish release
|
||||
@@ -45,6 +47,7 @@ runs:
|
||||
makeLatest: ${{ env.makeLatest }}
|
||||
tag: ${{ inputs.version }}
|
||||
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
||||
omitBody: ${{ env.omitBody }} # omitBody is true when the release is a official release.
|
||||
allowUpdates: true
|
||||
artifacts: |
|
||||
**/greptime-*/*
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
17
.github/workflows/develop.yml
vendored
17
.github/workflows/develop.yml
vendored
@@ -29,7 +29,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -42,7 +42,10 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -161,15 +164,18 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Collect coverage data
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
@@ -179,6 +185,7 @@ jobs:
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -13,4 +13,4 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
||||
uses: korandoru/hawkeye@v3
|
||||
|
||||
2
.github/workflows/nightly-ci.yml
vendored
2
.github/workflows/nightly-ci.yml
vendored
@@ -12,7 +12,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
header:
|
||||
license:
|
||||
spdx-id: Apache-2.0
|
||||
copyright-owner: Greptime Team
|
||||
|
||||
paths:
|
||||
- "**/*.rs"
|
||||
- "**/*.py"
|
||||
|
||||
comment: on-failure
|
||||
|
||||
dependency:
|
||||
files:
|
||||
- Cargo.toml
|
||||
@@ -49,6 +49,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
### Before PR
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
1482
Cargo.lock
generated
1482
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
24
Cargo.toml
24
Cargo.toml
@@ -43,25 +43,27 @@ members = [
|
||||
"src/partition",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
"src/storage",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"src/index",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.2"
|
||||
version = "0.4.4"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
@@ -69,7 +71,10 @@ arrow-flight = "47.0"
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
@@ -80,13 +85,15 @@ datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev =
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.12"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5da72f1cae6b24315e5afc87520aaf7b4d6bb872" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
@@ -96,23 +103,25 @@ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.gi
|
||||
] }
|
||||
parquet = "47.0"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
rust_decimal = "1.32.0"
|
||||
rust_decimal = "1.33"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu = "0.7"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -122,6 +131,7 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
@@ -132,6 +142,7 @@ common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
@@ -169,7 +180,6 @@ script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2022 Greptime Team
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
4
Makefile
4
Makefile
@@ -157,11 +157,11 @@ sqlness-test: ## Run sqlness test.
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
cargo check --workspace --all-targets
|
||||
cargo check --workspace --all-targets --all-features
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
|
||||
13
README.md
13
README.md
@@ -27,14 +27,6 @@
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
## Upcoming Event
|
||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
||||
<p align="center">
|
||||
<picture>
|
||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## What is GreptimeDB
|
||||
|
||||
GreptimeDB is an open-source time-series database with a special focus on
|
||||
@@ -108,7 +100,7 @@ Please see the online document site for more installation options and [operation
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
@@ -117,7 +109,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
@@ -143,6 +135,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||
|
||||
## Project Status
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
client.workspace = true
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
|
||||
@@ -152,6 +152,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
semantic_type: semantic_type as i32,
|
||||
..Default::default()
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
@@ -266,6 +267,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
@@ -274,6 +276,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
@@ -282,6 +285,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
@@ -290,6 +294,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
@@ -298,6 +303,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
@@ -306,6 +312,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
@@ -314,6 +321,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
@@ -322,6 +330,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
@@ -330,6 +339,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
@@ -338,6 +348,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
@@ -346,6 +357,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
@@ -354,6 +366,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
@@ -362,6 +375,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
@@ -370,6 +384,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
@@ -378,6 +393,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
@@ -386,6 +402,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
@@ -394,6 +411,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
@@ -402,6 +420,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
@@ -410,6 +429,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
|
||||
@@ -53,33 +53,6 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
max_files_in_level0 = 8
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
@@ -91,8 +64,8 @@ worker_channel_size = 128
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Manifest compression type
|
||||
manifest_compress_type = "Uncompressed"
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
@@ -105,10 +78,12 @@ global_write_buffer_reject_size = "2GB"
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
# Log options
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
|
||||
@@ -28,6 +28,13 @@ max_retry_times = 12
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
|
||||
@@ -122,35 +122,35 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
# Max task number that can concurrently run.
|
||||
max_inflight_tasks = 4
|
||||
# Max files in level 0 to trigger compaction.
|
||||
max_files_in_level0 = 8
|
||||
# Max task number for SST purge task after compaction.
|
||||
max_purge_tasks = 32
|
||||
|
||||
# Storage manifest options
|
||||
[storage.manifest]
|
||||
# Region checkpoint actions margin.
|
||||
# Create a checkpoint every <checkpoint_margin> actions.
|
||||
checkpoint_margin = 10
|
||||
# Region manifest logs and checkpoints gc execution duration
|
||||
gc_duration = '10m'
|
||||
|
||||
# Storage flush options
|
||||
[storage.flush]
|
||||
# Max inflight flush tasks.
|
||||
max_flush_tasks = 8
|
||||
# Default write buffer size for a region.
|
||||
region_write_buffer_size = "32MB"
|
||||
# Interval to check whether a region needs flush.
|
||||
picker_schedule_interval = "5m"
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
# Number of region workers
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
worker_channel_size = 128
|
||||
# Max batch size for a worker to handle requests
|
||||
worker_request_batch_size = 64
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
@@ -158,3 +158,9 @@ global_write_buffer_size = "1GB"
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# whether enable tracing, default is false
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
|
||||
47
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
47
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
@@ -0,0 +1,47 @@
|
||||
# Use the legacy glibc 2.28.
|
||||
FROM ubuntu:18.10
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
||||
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
build-essential \
|
||||
unzip \
|
||||
pkg-config
|
||||
|
||||
# Install protoc.
|
||||
ENV PROTOC_VERSION=25.1
|
||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi && \
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
||||
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
||||
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
||||
rm -f ${PROTOC_ZIP}
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Install Rust toolchains.
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-nextest --locked
|
||||
@@ -50,10 +50,10 @@ The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series
|
||||
```
|
||||
|
||||
The following parts will describe these implementation details:
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
- How to route these metric region tables and how those table are distributed
|
||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||
- How to maintain the schema of metric engine table
|
||||
- How the query goes
|
||||
|
||||
## Routing
|
||||
|
||||
|
||||
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
@@ -0,0 +1,169 @@
|
||||
---
|
||||
Feature Name: Region Migration Procedure
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2700
|
||||
Date: 2023-11-03
|
||||
Author: "Xu Wenkang <wenymedia@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes a way that brings the ability of Meta Server to move regions between the Datanodes.
|
||||
|
||||
# Motivation
|
||||
Typically, We need this ability in the following scenarios:
|
||||
- Migrate hot-spot Regions to idle Datanode
|
||||
- Move the failure Regions to an available Datanode
|
||||
|
||||
# Details
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
style Start fill:#85CB90,color:#fff
|
||||
style End fill:#85CB90,color:#fff
|
||||
style SelectCandidate fill:#F38488,color:#fff
|
||||
style OpenCandidate fill:#F38488,color:#fff
|
||||
style UpdateMetadataDown fill:#F38488,color:#fff
|
||||
style UpdateMetadataUp fill:#F38488,color:#fff
|
||||
style UpdateMetadataRollback fill:#F38488,color:#fff
|
||||
style DowngradeLeader fill:#F38488,color:#fff
|
||||
style UpgradeCandidate fill:#F38488,color:#fff
|
||||
|
||||
Start[Start]
|
||||
SelectCandidate[Select Candidate]
|
||||
UpdateMetadataDown["`Update Metadata(Down)
|
||||
1. Downgrade Leader
|
||||
`"]
|
||||
DowngradeLeader["`Downgrade Leader
|
||||
1. Become Follower
|
||||
2. Return **last_entry_id**
|
||||
`"]
|
||||
UpgradeCandidate["`Upgrade Candidate
|
||||
1. Replay to **last_entry_id**
|
||||
2. Become Leader
|
||||
`"]
|
||||
UpdateMetadataUp["`Update Metadata(Up)
|
||||
1. Switch Leader
|
||||
2.1. Remove Old Leader(Opt.)
|
||||
2.2. Move Old Leader to Follower(Opt.)
|
||||
`"]
|
||||
UpdateMetadataRollback["`Update Metadata(Rollback)
|
||||
1. Upgrade old Leader
|
||||
`"]
|
||||
End
|
||||
AnyCandidate{Available?}
|
||||
OpenCandidate["Open Candidate"]
|
||||
CloseOldLeader["Close Old Leader"]
|
||||
|
||||
Start
|
||||
--> SelectCandidate
|
||||
--> AnyCandidate
|
||||
--> |Yes| UpdateMetadataDown
|
||||
--> I1["Invalid Frontend Cache"]
|
||||
--> DowngradeLeader
|
||||
--> UpgradeCandidate
|
||||
--> UpdateMetadataUp
|
||||
--> I2["Invalid Frontend Cache"]
|
||||
--> End
|
||||
|
||||
UpgradeCandidate
|
||||
--> UpdateMetadataRollback
|
||||
--> I3["Invalid Frontend Cache"]
|
||||
--> End
|
||||
|
||||
I2
|
||||
--> CloseOldLeader
|
||||
--> End
|
||||
|
||||
AnyCandidate
|
||||
--> |No| OpenCandidate
|
||||
--> UpdateMetadataDown
|
||||
```
|
||||
|
||||
**Only the red nodes will persist state after it has succeeded**, and other nodes won't persist state. (excluding the Start and End nodes).
|
||||
|
||||
## Steps
|
||||
|
||||
**The persistent context:** It's shared in each step and available after recovering. It will only be updated/stored after the Red node has succeeded.
|
||||
|
||||
Values:
|
||||
- `region_id`: The target leader region.
|
||||
- `peer`: The target datanode.
|
||||
- `close_old_leader`: Indicates whether close the region.
|
||||
- `leader_may_unreachable`: It's used to support the failover procedure.
|
||||
|
||||
**The Volatile context:** It's shared in each step and available in executing (including retrying). It will be dropped if the procedure runner crashes.
|
||||
|
||||
### Select Candidate
|
||||
|
||||
The Persistent state: Selected Candidate Region.
|
||||
|
||||
### Update Metadata(Down)
|
||||
|
||||
**The Persistent context:**
|
||||
- The (latest/updated) `version` of `TableRouteValue`, It will be used in the step of `Update Metadata(Up)`.
|
||||
|
||||
### Downgrade Leader
|
||||
This step sends an instruction via heartbeat and performs:
|
||||
1. Downgrades leader region.
|
||||
2. Retrieves the `last_entry_id` (if available).
|
||||
|
||||
If the target leader region is not found:
|
||||
- Sets `close_old_leader` to true.
|
||||
- Sets `leader_may_unreachable` to true.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Waits for region lease expired.
|
||||
- Sets `close_old_leader` to true.
|
||||
- Sets `leader_may_unreachable` to true.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
**The Persistent state:**
|
||||
- `last_entry_id`
|
||||
|
||||
*Passes to next step.
|
||||
|
||||
|
||||
### Upgrade Candidate
|
||||
This step sends an instruction via heartbeat and performs:
|
||||
1. Replays the WAL to latest(`last_entry_id`).
|
||||
2. Upgrades the candidate region.
|
||||
|
||||
If the target region is not found:
|
||||
- Rollbacks.
|
||||
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||
- Exits procedure.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Rollbacks.
|
||||
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||
- Exits procedure.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
### Update Metadata(Up)
|
||||
This step performs
|
||||
1. Switches Leader.
|
||||
2. Removes Old Leader(Opt.).
|
||||
3. Moves Old Leader to follower(Opt.).
|
||||
|
||||
The `TableRouteValue` version should equal the `TableRouteValue`'s `version` in Persistent context. Otherwise, verifies whether `TableRouteValue` already updated.
|
||||
|
||||
**The Persistent context:**
|
||||
None
|
||||
|
||||
### Close Old Leader(Opt.)
|
||||
This step sends a close region instruction via heartbeat.
|
||||
|
||||
If the target leader region is not found:
|
||||
- Ignore.
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Ignore.
|
||||
|
||||
### Open Candidate(Opt.)
|
||||
This step sends an open region instruction via heartbeat and waits for conditions to be met (typically, the condition is that the `last_entry_id` of the Candidate Region is very close to that of the Leader Region or the latest).
|
||||
|
||||
If the target Datanode is unreachable:
|
||||
- Exits procedure.
|
||||
24
licenserc.toml
Normal file
24
licenserc.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Copyright 2023 Greptime Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
headerPath = "Apache-2.0.txt"
|
||||
|
||||
includes = [
|
||||
"*.rs",
|
||||
"*.py",
|
||||
]
|
||||
|
||||
[properties]
|
||||
inceptionYear = 2023
|
||||
copyrightOwner = "Greptime Team"
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-08-07"
|
||||
channel = "nightly-2023-10-21"
|
||||
|
||||
157
scripts/run-pyo3-greptime.sh
Executable file
157
scripts/run-pyo3-greptime.sh
Executable file
@@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script configures the environment to run 'greptime' with the required Python version
|
||||
|
||||
# This script should be compatible both in Linux and macOS
|
||||
OS_TYPE="$(uname)"
|
||||
readonly OS_TYPE
|
||||
|
||||
check_command_existence() {
|
||||
command -v "$1" &> /dev/null
|
||||
}
|
||||
|
||||
get_python_version() {
|
||||
case "$OS_TYPE" in
|
||||
Darwin)
|
||||
otool -L $GREPTIME_BIN_PATH | grep -o 'Python.framework/Versions/3.[0-9]\+/Python' | grep -o '3.[0-9]\+'
|
||||
;;
|
||||
Linux)
|
||||
ldd $GREPTIME_BIN_PATH | grep -o 'libpython3\.[0-9]\+' | grep -o '3\.[0-9]\+'
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported OS type: $OS_TYPE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
setup_virtualenv() {
|
||||
local req_py_version="$1"
|
||||
local env_name="GreptimeTmpVenv$req_py_version"
|
||||
virtualenv --python=python"$req_py_version" "$env_name"
|
||||
source "$env_name/bin/activate"
|
||||
}
|
||||
|
||||
setup_conda_env() {
|
||||
local req_py_version="$1"
|
||||
local conda_base
|
||||
conda_base=$(conda info --base) || { echo "Error obtaining conda base directory"; exit 1; }
|
||||
. "$conda_base/etc/profile.d/conda.sh"
|
||||
|
||||
if ! conda list --name "GreptimeTmpPyO3Env$req_py_version" &> /dev/null; then
|
||||
conda create --yes --name "GreptimeTmpPyO3Env$req_py_version" python="$req_py_version"
|
||||
fi
|
||||
|
||||
conda activate "GreptimeTmpPyO3Env$req_py_version"
|
||||
}
|
||||
|
||||
GREPTIME_BIN_PATH="./greptime"
|
||||
YES="false"
|
||||
|
||||
usage() {
|
||||
echo "Usage:"
|
||||
echo " $0 -f <greptime-bin-path> [-y] <args-pass-to-greptime>"
|
||||
echo "Set $PY_ENV_MAN to 1 to use virtualenv, 2 to use conda"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function parse_args() {
|
||||
while getopts ":f:y" opt; do
|
||||
case $opt in
|
||||
f)
|
||||
GREPTIME_BIN_PATH=$OPTARG
|
||||
;;
|
||||
y)
|
||||
YES="yes"
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
echo "Option -$OPTARG requires an argument." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND -1))
|
||||
|
||||
REST_ARGS=$*
|
||||
|
||||
if [ -z "$GREPTIME_BIN_PATH" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
echo "Run greptime binary at '$GREPTIME_BIN_PATH' (yes=$YES)..."
|
||||
echo "The args pass to greptime: '$REST_ARGS'"
|
||||
}
|
||||
|
||||
# Set library path and pass all arguments to greptime to run it
|
||||
execute_greptime() {
|
||||
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
||||
DYLD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
||||
LD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args $@
|
||||
|
||||
local req_py_version
|
||||
req_py_version=$(get_python_version)
|
||||
readonly req_py_version
|
||||
|
||||
if [[ -z "$req_py_version" ]]; then
|
||||
if $GREPTIME_BIN_PATH --version &> /dev/null; then
|
||||
$GREPTIME_BIN_PATH $REST_ARGS
|
||||
else
|
||||
echo "The 'greptime' binary is not valid or encountered an error."
|
||||
$GREPTIME_BIN_PATH --version
|
||||
exit 1
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "The required version of Python shared library is $req_py_version"
|
||||
|
||||
# if YES exist, assign it to yn, else read from stdin
|
||||
if [[ -z "$YES" ]]; then
|
||||
echo "Now this script will try to install or find correct Python Version"
|
||||
echo "Do you want to continue? (yes/no): "
|
||||
read -r yn
|
||||
else
|
||||
yn="$YES"
|
||||
fi
|
||||
case $yn in
|
||||
[Yy]* ) ;;
|
||||
[Nn]* ) exit;;
|
||||
* ) echo "Please answer yes or no.";;
|
||||
esac
|
||||
|
||||
# if USE_ENV exist, assign it to option
|
||||
# else read from stdin
|
||||
if [[ -z "$PY_ENV_MAN" ]]; then
|
||||
echo "Do you want to use virtualenv or conda? (virtualenv(1)/conda(2)): "
|
||||
read -r option
|
||||
else
|
||||
option="$PY_ENV_MAN"
|
||||
fi
|
||||
|
||||
case $option in
|
||||
1)
|
||||
setup_virtualenv "$req_py_version"
|
||||
;;
|
||||
2)
|
||||
setup_conda_env "$req_py_version"
|
||||
;;
|
||||
*)
|
||||
echo "Please input 1 or 2"; exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
execute_greptime $REST_ARGS
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -5,14 +5,16 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-decimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_decimal::Decimal128;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
@@ -26,47 +28,71 @@ use datatypes::types::{
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
|
||||
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||
};
|
||||
use greptime_proto::v1;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
use greptime_proto::v1::ddl_request::Expr;
|
||||
use greptime_proto::v1::greptime_request::Request;
|
||||
use greptime_proto::v1::query_request::Query;
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
use greptime_proto::v1::{self, DdlRequest, IntervalMonthDayNano, QueryRequest, Row, SemanticType};
|
||||
use greptime_proto::v1::{
|
||||
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
|
||||
};
|
||||
use paste::paste;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::v1::column::Values;
|
||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ColumnDataTypeWrapper(ColumnDataType);
|
||||
/// ColumnDataTypeWrapper is a wrapper of ColumnDataType and ColumnDataTypeExtension.
|
||||
/// It could be used to convert with ConcreteDataType.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType,
|
||||
datatype_ext: Option<ColumnDataTypeExtension>,
|
||||
}
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
pub fn try_new(datatype: i32) -> Result<Self> {
|
||||
/// Try to create a ColumnDataTypeWrapper from i32(ColumnDataType) and ColumnDataTypeExtension.
|
||||
pub fn try_new(datatype: i32, datatype_ext: Option<ColumnDataTypeExtension>) -> Result<Self> {
|
||||
let datatype = ColumnDataType::try_from(datatype)
|
||||
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
||||
Ok(Self(datatype))
|
||||
Ok(Self {
|
||||
datatype,
|
||||
datatype_ext,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new(datatype: ColumnDataType) -> Self {
|
||||
Self(datatype)
|
||||
/// Create a ColumnDataTypeWrapper from ColumnDataType and ColumnDataTypeExtension.
|
||||
pub fn new(datatype: ColumnDataType, datatype_ext: Option<ColumnDataTypeExtension>) -> Self {
|
||||
Self {
|
||||
datatype,
|
||||
datatype_ext,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the ColumnDataType.
|
||||
pub fn datatype(&self) -> ColumnDataType {
|
||||
self.0
|
||||
self.datatype
|
||||
}
|
||||
|
||||
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||
(self.datatype, self.datatype_ext.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
fn from(datatype: ColumnDataTypeWrapper) -> Self {
|
||||
match datatype.0 {
|
||||
fn from(datatype_wrapper: ColumnDataTypeWrapper) -> Self {
|
||||
match datatype_wrapper.datatype {
|
||||
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
|
||||
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
|
||||
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
|
||||
@@ -109,6 +135,100 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ConcreteDataType::duration_microsecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||
ColumnDataType::Decimal128 => {
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||
{
|
||||
ConcreteDataType::decimal128_datatype(d.precision as u8, d.scale as i8)
|
||||
} else {
|
||||
ConcreteDataType::decimal128_default_datatype()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This macro is used to generate datatype functions
|
||||
/// with lower style for ColumnDataTypeWrapper.
|
||||
///
|
||||
///
|
||||
/// For example: we can use `ColumnDataTypeWrapper::int8_datatype()`,
|
||||
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::Int8`.
|
||||
macro_rules! impl_column_type_functions {
|
||||
($($Type: ident), +) => {
|
||||
paste! {
|
||||
impl ColumnDataTypeWrapper {
|
||||
$(
|
||||
pub fn [<$Type:lower _datatype>]() -> ColumnDataTypeWrapper {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::$Type,
|
||||
datatype_ext: None,
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This macro is used to generate datatype functions
|
||||
/// with snake style for ColumnDataTypeWrapper.
|
||||
///
|
||||
///
|
||||
/// For example: we can use `ColumnDataTypeWrapper::duration_second_datatype()`,
|
||||
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::DurationSecond`.
|
||||
macro_rules! impl_column_type_functions_with_snake {
|
||||
($($TypeName: ident), +) => {
|
||||
paste!{
|
||||
impl ColumnDataTypeWrapper {
|
||||
$(
|
||||
pub fn [<$TypeName:snake _datatype>]() -> ColumnDataTypeWrapper {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::$TypeName,
|
||||
datatype_ext: None,
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_column_type_functions!(
|
||||
Boolean, Uint8, Uint16, Uint32, Uint64, Int8, Int16, Int32, Int64, Float32, Float64, Binary,
|
||||
Date, Datetime, String
|
||||
);
|
||||
|
||||
impl_column_type_functions_with_snake!(
|
||||
TimestampSecond,
|
||||
TimestampMillisecond,
|
||||
TimestampMicrosecond,
|
||||
TimestampNanosecond,
|
||||
TimeSecond,
|
||||
TimeMillisecond,
|
||||
TimeMicrosecond,
|
||||
TimeNanosecond,
|
||||
IntervalYearMonth,
|
||||
IntervalDayTime,
|
||||
IntervalMonthDayNano,
|
||||
DurationSecond,
|
||||
DurationMillisecond,
|
||||
DurationMicrosecond,
|
||||
DurationNanosecond
|
||||
);
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
pub fn decimal128_datatype(precision: i32, scale: i32) -> Self {
|
||||
ColumnDataTypeWrapper {
|
||||
datatype: ColumnDataType::Decimal128,
|
||||
datatype_ext: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision,
|
||||
scale,
|
||||
})),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -117,7 +237,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
||||
let datatype = ColumnDataTypeWrapper(match datatype {
|
||||
let column_datatype = match datatype {
|
||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||
@@ -156,13 +276,30 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||
},
|
||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
});
|
||||
Ok(datatype)
|
||||
};
|
||||
let datatype_extension = match column_datatype {
|
||||
ColumnDataType::Decimal128 => {
|
||||
datatype
|
||||
.as_decimal128()
|
||||
.map(|decimal_type| ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision: decimal_type.precision() as i32,
|
||||
scale: decimal_type.scale() as i32,
|
||||
})),
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(Self {
|
||||
datatype: column_datatype,
|
||||
datatype_ext: datatype_extension,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,6 +425,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Decimal128 => Values {
|
||||
decimal128_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,6 +482,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||
Value::List(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
@@ -381,17 +523,29 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
}
|
||||
|
||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
||||
pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
|
||||
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
||||
let interval = Interval::from_i128(v);
|
||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months,
|
||||
days,
|
||||
nanoseconds,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
||||
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
||||
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
||||
let value = v.val();
|
||||
v1::Decimal128 {
|
||||
hi: (value >> 64) as i64,
|
||||
lo: value as i64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pb_value_to_value_ref<'a>(
|
||||
value: &'a v1::Value,
|
||||
datatype_ext: &'a Option<ColumnDataTypeExtension>,
|
||||
) -> ValueRef<'a> {
|
||||
let Some(value) = &value.value_data else {
|
||||
return ValueRef::Null;
|
||||
};
|
||||
@@ -436,6 +590,28 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||
ValueData::Decimal128Value(v) => {
|
||||
// get precision and scale from datatype_extension
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||
.as_ref()
|
||||
.and_then(|column_ext| column_ext.type_ext.as_ref())
|
||||
{
|
||||
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||
v.hi,
|
||||
v.lo,
|
||||
d.precision as u8,
|
||||
d.scale as i8,
|
||||
))
|
||||
} else {
|
||||
// If the precision and scale are not set, use the default value.
|
||||
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||
v.hi,
|
||||
v.lo,
|
||||
DECIMAL128_MAX_PRECISION,
|
||||
DECIMAL128_DEFAULT_SCALE,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -522,6 +698,11 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
values.duration_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||
values.decimal128_values.iter().map(|x| {
|
||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||
}),
|
||||
)),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -692,6 +873,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Decimal128(d) => values
|
||||
.decimal128_values
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
Value::Decimal128(Decimal128::from_value_precision_scale(
|
||||
v.hi,
|
||||
v.lo,
|
||||
d.precision(),
|
||||
d.scale(),
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -704,12 +897,14 @@ pub fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool
|
||||
}
|
||||
|
||||
/// Returns true if the pb type value is valid.
|
||||
pub fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
|
||||
let Ok(column_type) = ColumnDataType::try_from(type_value) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
is_column_type_eq(column_type, expect_type)
|
||||
pub fn is_column_type_value_eq(
|
||||
type_value: i32,
|
||||
type_extension: Option<ColumnDataTypeExtension>,
|
||||
expect_type: &ConcreteDataType,
|
||||
) -> bool {
|
||||
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||
.map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Convert value into proto's value.
|
||||
@@ -816,13 +1011,19 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Decimal128(v) => {
|
||||
let (hi, lo) = v.split_value();
|
||||
v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
|
||||
}
|
||||
}
|
||||
Value::List(_) => return None,
|
||||
};
|
||||
|
||||
Some(proto_value)
|
||||
}
|
||||
|
||||
/// Returns the [ColumnDataType] of the value.
|
||||
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||
///
|
||||
/// If value is null, returns `None`.
|
||||
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
@@ -857,65 +1058,11 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||
};
|
||||
Some(value_type)
|
||||
}
|
||||
|
||||
/// Convert [ConcreteDataType] to [ColumnDataType].
|
||||
pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataType> {
|
||||
let column_data_type = match data_type {
|
||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||
ConcreteDataType::Int32(_) => ColumnDataType::Int32,
|
||||
ConcreteDataType::Int64(_) => ColumnDataType::Int64,
|
||||
ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
|
||||
ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
|
||||
ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
|
||||
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
|
||||
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
|
||||
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
|
||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
ConcreteDataType::Timestamp(TimestampType::Second(_)) => ColumnDataType::TimestampSecond,
|
||||
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => {
|
||||
ColumnDataType::TimestampMillisecond
|
||||
}
|
||||
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => {
|
||||
ColumnDataType::TimestampMicrosecond
|
||||
}
|
||||
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => {
|
||||
ColumnDataType::TimestampNanosecond
|
||||
}
|
||||
ConcreteDataType::Time(TimeType::Second(_)) => ColumnDataType::TimeSecond,
|
||||
ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
|
||||
ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
|
||||
ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
|
||||
ConcreteDataType::Duration(DurationType::Second(_)) => ColumnDataType::DurationSecond,
|
||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => {
|
||||
ColumnDataType::DurationMillisecond
|
||||
}
|
||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => {
|
||||
ColumnDataType::DurationMicrosecond
|
||||
}
|
||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => {
|
||||
ColumnDataType::DurationNanosecond
|
||||
}
|
||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => ColumnDataType::IntervalYearMonth,
|
||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
|
||||
ColumnDataType::IntervalMonthDayNano
|
||||
}
|
||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
return None
|
||||
}
|
||||
};
|
||||
|
||||
Some(column_data_type)
|
||||
}
|
||||
|
||||
pub fn vectors_to_rows<'a>(
|
||||
columns: impl Iterator<Item = &'a VectorRef>,
|
||||
row_count: usize,
|
||||
@@ -974,20 +1121,15 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Decimal128(v) => {
|
||||
let (hi, lo) = v.split_value();
|
||||
Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
|
||||
}
|
||||
Value::List(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the column type is equal to expected type.
|
||||
fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
|
||||
if let Some(expect) = to_column_data_type(expect_type) {
|
||||
column_type == expect
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -1081,189 +1223,204 @@ mod tests {
|
||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||
let values = values.duration_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||
let values = values.decimal128_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concrete_datatype_from_column_datatype() {
|
||||
assert_eq!(
|
||||
ConcreteDataType::boolean_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Boolean).into()
|
||||
ColumnDataTypeWrapper::boolean_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int8_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int8).into()
|
||||
ColumnDataTypeWrapper::int8_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int16_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int16).into()
|
||||
ColumnDataTypeWrapper::int16_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int32).into()
|
||||
ColumnDataTypeWrapper::int32_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int64).into()
|
||||
ColumnDataTypeWrapper::int64_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint8_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint8).into()
|
||||
ColumnDataTypeWrapper::uint8_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint16_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint16).into()
|
||||
ColumnDataTypeWrapper::uint16_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint32).into()
|
||||
ColumnDataTypeWrapper::uint32_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint64).into()
|
||||
ColumnDataTypeWrapper::uint64_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::float32_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float32).into()
|
||||
ColumnDataTypeWrapper::float32_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float64).into()
|
||||
ColumnDataTypeWrapper::float64_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::binary_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Binary).into()
|
||||
ColumnDataTypeWrapper::binary_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::String).into()
|
||||
ColumnDataTypeWrapper::string_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::date_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Date).into()
|
||||
ColumnDataTypeWrapper::date_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
|
||||
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
|
||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
|
||||
ColumnDataTypeWrapper::time_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
|
||||
ColumnDataTypeWrapper::interval_day_time_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
|
||||
ColumnDataTypeWrapper::interval_year_month_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond).into()
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2),
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_datatype_from_concrete_datatype() {
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Boolean),
|
||||
ColumnDataTypeWrapper::boolean_datatype(),
|
||||
ConcreteDataType::boolean_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int8),
|
||||
ColumnDataTypeWrapper::int8_datatype(),
|
||||
ConcreteDataType::int8_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int16),
|
||||
ColumnDataTypeWrapper::int16_datatype(),
|
||||
ConcreteDataType::int16_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int32),
|
||||
ColumnDataTypeWrapper::int32_datatype(),
|
||||
ConcreteDataType::int32_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Int64),
|
||||
ColumnDataTypeWrapper::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint8),
|
||||
ColumnDataTypeWrapper::uint8_datatype(),
|
||||
ConcreteDataType::uint8_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint16),
|
||||
ColumnDataTypeWrapper::uint16_datatype(),
|
||||
ConcreteDataType::uint16_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint32),
|
||||
ColumnDataTypeWrapper::uint32_datatype(),
|
||||
ConcreteDataType::uint32_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Uint64),
|
||||
ColumnDataTypeWrapper::uint64_datatype(),
|
||||
ConcreteDataType::uint64_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float32),
|
||||
ColumnDataTypeWrapper::float32_datatype(),
|
||||
ConcreteDataType::float32_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Float64),
|
||||
ColumnDataTypeWrapper::float64_datatype(),
|
||||
ConcreteDataType::float64_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Binary),
|
||||
ColumnDataTypeWrapper::binary_datatype(),
|
||||
ConcreteDataType::binary_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::String),
|
||||
ColumnDataTypeWrapper::string_datatype(),
|
||||
ConcreteDataType::string_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Date),
|
||||
ColumnDataTypeWrapper::date_datatype(),
|
||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::Datetime),
|
||||
ColumnDataTypeWrapper::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
|
||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
|
||||
ColumnDataTypeWrapper::interval_year_month_datatype(),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
|
||||
ColumnDataTypeWrapper::interval_day_time_datatype(),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype(),
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond),
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||
ConcreteDataType::decimal128_datatype(10, 2)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||
assert!(result.is_err());
|
||||
assert_eq!(
|
||||
@@ -1290,6 +1447,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1331,6 +1489,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1372,6 +1531,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1416,6 +1576,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
@@ -1460,6 +1621,7 @@ mod tests {
|
||||
}),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
..Default::default()
|
||||
};
|
||||
let row_count = 4;
|
||||
|
||||
@@ -1617,17 +1779,17 @@ mod tests {
|
||||
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
||||
Values {
|
||||
interval_month_day_nano_values: vec![
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months: 1,
|
||||
days: 2,
|
||||
nanoseconds: 3,
|
||||
},
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months: 5,
|
||||
days: 6,
|
||||
nanoseconds: 7,
|
||||
},
|
||||
IntervalMonthDayNano {
|
||||
v1::IntervalMonthDayNano {
|
||||
months: 9,
|
||||
days: 10,
|
||||
nanoseconds: 11,
|
||||
@@ -1859,4 +2021,33 @@ mod tests {
|
||||
assert_eq!(values[6], ValueData::DateValue(30));
|
||||
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_column_type_value_eq() {
|
||||
// test column type eq
|
||||
let column1 = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
bool_values: vec![false, true, true],
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Boolean as i32,
|
||||
datatype_extension: None,
|
||||
};
|
||||
assert!(is_column_type_value_eq(
|
||||
column1.datatype,
|
||||
column1.datatype_extension,
|
||||
&ConcreteDataType::boolean_datatype(),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_to_pb_decimal128() {
|
||||
let decimal = Decimal128::new(123, 3, 1);
|
||||
let pb_decimal = convert_to_pb_decimal128(decimal);
|
||||
assert_eq!(pb_decimal.lo, 123);
|
||||
assert_eq!(pb_decimal.hi, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,10 @@ use crate::helper::ColumnDataTypeWrapper;
|
||||
use crate::v1::ColumnDef;
|
||||
|
||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
|
||||
let data_type = ColumnDataTypeWrapper::try_new(
|
||||
column_def.data_type,
|
||||
column_def.datatype_extension.clone(),
|
||||
)?;
|
||||
|
||||
let constraint = if column_def.default_constraint.is_empty() {
|
||||
None
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,13 +4,14 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
|
||||
use crate::error::{
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -8,28 +8,28 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-client.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
@@ -37,17 +37,16 @@ prometheus.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
session = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
store-api = { workspace = true }
|
||||
table = { workspace = true }
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
common-test-util = { workspace = true }
|
||||
log-store = { workspace = true }
|
||||
object-store = { workspace = true }
|
||||
storage = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -180,7 +180,7 @@ pub enum Error {
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(""))]
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
@@ -216,7 +216,7 @@ pub enum Error {
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display(""))]
|
||||
#[snafu(display("DataFusion error"))]
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
|
||||
@@ -202,7 +202,7 @@ impl InformationSchemaColumnsBuilder {
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
column.data_type.name(),
|
||||
&column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ use std::sync::{Arc, Weak};
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
@@ -55,7 +54,6 @@ pub struct KvBackendCatalogManager {
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
}
|
||||
@@ -76,16 +74,11 @@ impl CacheInvalidator for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
backend: KvBackendRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Arc<Self> {
|
||||
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
cache_invalidator,
|
||||
datanode_manager,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
},
|
||||
@@ -99,10 +92,6 @@ impl KvBackendCatalogManager {
|
||||
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
||||
self.datanode_manager.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -8,22 +8,22 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
@@ -33,17 +33,17 @@ parking_lot = "0.12"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
session = { workspace = true }
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-grpc-expr = { workspace = true }
|
||||
datanode = { workspace = true }
|
||||
common-grpc-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
derive-new = "0.5"
|
||||
substrait = { workspace = true }
|
||||
substrait.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ async fn run() {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "key".to_string(),
|
||||
@@ -54,6 +55,7 @@ async fn run() {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "value".to_string(),
|
||||
@@ -62,6 +64,7 @@ async fn run() {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "timestamp".to_string(),
|
||||
@@ -78,7 +81,7 @@ async fn run() {
|
||||
|
||||
let logical = mock_logical_plan();
|
||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||
let result = db.logical_plan(logical, 0).await.unwrap();
|
||||
let result = db.logical_plan(logical).await.unwrap();
|
||||
|
||||
event!(Level::INFO, "result: {:#?}", result);
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamAdaptor;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -147,21 +148,21 @@ impl Database {
|
||||
|
||||
async fn handle(&self, request: Request) -> Result<u32> {
|
||||
let mut client = self.client.make_database_client()?.inner;
|
||||
let request = self.to_rpc_request(request, 0);
|
||||
let request = self.to_rpc_request(request);
|
||||
let response = client.handle(request).await?.into_inner();
|
||||
from_grpc_response(response)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_rpc_request(&self, request: Request, trace_id: u64) -> GreptimeRequest {
|
||||
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||
GreptimeRequest {
|
||||
header: Some(RequestHeader {
|
||||
catalog: self.catalog.clone(),
|
||||
schema: self.schema.clone(),
|
||||
authorization: self.ctx.auth_header.clone(),
|
||||
dbname: self.dbname.clone(),
|
||||
trace_id,
|
||||
span_id: 0,
|
||||
// TODO(Taylor-lagrange): add client grpc tracing
|
||||
tracing_context: W3cTrace::new(),
|
||||
}),
|
||||
request: Some(request),
|
||||
}
|
||||
@@ -172,23 +173,17 @@ impl Database {
|
||||
S: AsRef<str>,
|
||||
{
|
||||
let _timer = metrics::METRIC_GRPC_SQL.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>, trace_id: u64) -> Result<Output> {
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}),
|
||||
trace_id,
|
||||
)
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -200,68 +195,53 @@ impl Database {
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
query: promql.to_string(),
|
||||
start: start.to_string(),
|
||||
end: end.to_string(),
|
||||
step: step.to_string(),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_ALTER.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||
let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn do_get(&self, request: Request, trace_id: u64) -> Result<Output> {
|
||||
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = metrics::METRIC_GRPC_DO_GET.start_timer();
|
||||
let request = self.to_rpc_request(request, trace_id);
|
||||
let request = self.to_rpc_request(request);
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
|
||||
@@ -16,50 +16,50 @@ tokio-console = ["common-telemetry/tokio-console"]
|
||||
anymap = "1.0.0-beta.2"
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
catalog = { workspace = true }
|
||||
catalog.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-config = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-telemetry = { workspace = true, features = [
|
||||
"deadlock_detection",
|
||||
] }
|
||||
config = "0.13"
|
||||
datanode = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine = { workspace = true }
|
||||
frontend = { workspace = true }
|
||||
file-engine.workspace = true
|
||||
frontend.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-srv = { workspace = true }
|
||||
mito2 = { workspace = true }
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
mito2.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { workspace = true }
|
||||
partition.workspace = true
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query = { workspace = true }
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
rustyline = "10.1"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers = { workspace = true }
|
||||
session = { workspace = true }
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait = { workspace = true }
|
||||
table = { workspace = true }
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
@@ -67,7 +67,7 @@ toml.workspace = true
|
||||
tikv-jemallocator = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
|
||||
@@ -75,4 +75,4 @@ temp-env = "0.3"
|
||||
rexpect = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
common-version = { workspace = true }
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -208,7 +208,8 @@ async fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
||||
let _guard =
|
||||
common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts, opts.node_id());
|
||||
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
|
||||
use client::client_manager::DatanodeClients;
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::ErrorExt;
|
||||
@@ -176,7 +175,7 @@ impl Repl {
|
||||
.encode(&plan)
|
||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||
|
||||
self.database.logical_plan(plan.to_vec(), 0).await
|
||||
self.database.logical_plan(plan.to_vec()).await
|
||||
} else {
|
||||
self.database.sql(&sql).await
|
||||
}
|
||||
@@ -250,13 +249,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
|
||||
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||
|
||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
||||
|
||||
let catalog_list = KvBackendCatalogManager::new(
|
||||
cached_meta_backend.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
datanode_clients,
|
||||
);
|
||||
let catalog_list =
|
||||
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
|
||||
@@ -12,15 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use datanode::config::DatanodeOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
@@ -89,7 +91,7 @@ struct StartCommand {
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
@@ -177,7 +179,27 @@ impl StartCommand {
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = DatanodeBuilder::new(opts, None, plugins)
|
||||
let node_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
|
||||
let meta_config = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = datanode::heartbeat::new_metasrv_client(node_id, meta_config)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(MetaKvBackend {
|
||||
client: Arc::new(meta_client.clone()),
|
||||
});
|
||||
|
||||
let datanode = DatanodeBuilder::new(opts, plugins)
|
||||
.with_meta_client(meta_client)
|
||||
.with_kv_backend(meta_backend)
|
||||
.enable_region_server_service()
|
||||
.enable_http_service()
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -191,9 +213,8 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use datanode::config::{FileConfig, ObjectStoreConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
@@ -233,16 +254,6 @@ mod tests {
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[storage.manifest]
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
compress = true
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -295,24 +306,6 @@ mod tests {
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
|
||||
assert_eq!(
|
||||
CompactionConfig {
|
||||
max_inflight_tasks: 3,
|
||||
max_files_in_level0: 7,
|
||||
max_purge_tasks: 32,
|
||||
sst_write_buffer_size: ReadableSize::mb(8),
|
||||
},
|
||||
options.storage.compaction,
|
||||
);
|
||||
assert_eq!(
|
||||
RegionManifestConfig {
|
||||
checkpoint_margin: Some(9),
|
||||
gc_duration: Some(Duration::from_secs(7)),
|
||||
compress: true
|
||||
},
|
||||
options.storage.manifest,
|
||||
);
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
}
|
||||
@@ -389,18 +382,12 @@ mod tests {
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -411,26 +398,24 @@ mod tests {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
(
|
||||
// storage.manifest.gc_duration = 9s
|
||||
// wal.purge_interval = 1m
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
"wal".to_uppercase(),
|
||||
"purge_interval".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("9s"),
|
||||
Some("1m"),
|
||||
),
|
||||
(
|
||||
// storage.compaction.max_purge_tasks = 99
|
||||
// wal.read_batch_size = 100
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"compaction".to_uppercase(),
|
||||
"max_purge_tasks".to_uppercase(),
|
||||
"wal".to_uppercase(),
|
||||
"read_batch_size".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
Some("100"),
|
||||
),
|
||||
(
|
||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||
@@ -458,10 +443,7 @@ mod tests {
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(9))
|
||||
);
|
||||
assert_eq!(opts.wal.read_batch_size, 100,);
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
@@ -472,19 +454,13 @@ mod tests {
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
||||
assert_eq!(opts.wal.purge_interval, Duration::from_secs(60 * 10));
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.checkpoint_margin,
|
||||
DatanodeOptions::default()
|
||||
.storage
|
||||
.manifest
|
||||
.checkpoint_margin
|
||||
);
|
||||
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -37,6 +37,12 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init DDL manager"))]
|
||||
InitDdlManager {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
location: Location,
|
||||
@@ -240,9 +246,11 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::IterStream { source, .. }
|
||||
| Error::InitMetadata { source, .. }
|
||||
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
@@ -253,6 +261,7 @@ impl ErrorExt for Error {
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -12,18 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::CachedMetaKvBackend;
|
||||
use clap::Parser;
|
||||
use client::client_manager::DatanodeClients;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use meta_client::MetaClientOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, Result, StartFrontendSnafu};
|
||||
use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -100,7 +108,7 @@ pub struct StartCommand {
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addr: Option<Vec<String>>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
@@ -196,10 +204,38 @@ impl StartCommand {
|
||||
logging::info!("Frontend start command: {:#?}", self);
|
||||
logging::info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client'",
|
||||
})?;
|
||||
let meta_client = FeInstance::create_meta_client(meta_client_options)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
Arc::new(InvalidateTableCacheHandler::new(meta_backend.clone())),
|
||||
]);
|
||||
|
||||
let heartbeat_task = HeartbeatTask::new(
|
||||
meta_client.clone(),
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
);
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
meta_backend.clone(),
|
||||
Arc::new(DatanodeClients::default()),
|
||||
meta_client,
|
||||
)
|
||||
.with_cache_invalidator(meta_backend)
|
||||
.with_plugin(plugins)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(opts)
|
||||
.await
|
||||
|
||||
@@ -100,6 +100,9 @@ struct StartCommand {
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_METASRV")]
|
||||
env_prefix: String,
|
||||
/// The working home directory of this metasrv instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -152,6 +155,10 @@ impl StartCommand {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
@@ -166,7 +173,12 @@ impl StartCommand {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
let instance = MetaSrvInstance::new(opts, plugins)
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
let instance = MetaSrvInstance::new(opts, plugins, metasrv)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
@@ -216,6 +228,12 @@ mod tests {
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
min_std_deviation = "100ms"
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -234,6 +252,25 @@ mod tests {
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
options.failure_detector.min_std_deviation.as_millis() as f32
|
||||
);
|
||||
assert_eq!(
|
||||
3000,
|
||||
options
|
||||
.failure_detector
|
||||
.acceptable_heartbeat_pause
|
||||
.as_millis()
|
||||
);
|
||||
assert_eq!(
|
||||
1000,
|
||||
options
|
||||
.failure_detector
|
||||
.first_heartbeat_estimate
|
||||
.as_millis()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -28,7 +28,7 @@ pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
#[derive(Serialize)]
|
||||
#[derive(Serialize, Debug)]
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -133,12 +133,20 @@ impl Options {
|
||||
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> Option<String> {
|
||||
match self {
|
||||
Options::Metasrv(_) | Options::Cli(_) => None,
|
||||
Options::Datanode(opt) => opt.node_id.map(|x| x.to_string()),
|
||||
Options::Frontend(opt) => opt.node_id.clone(),
|
||||
Options::Standalone(opt) => opt.frontend.node_id.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
||||
@@ -170,11 +178,6 @@ mod tests {
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 3
|
||||
max_files_in_level0 = 7
|
||||
max_purge_tasks = 32
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
@@ -185,17 +188,6 @@ mod tests {
|
||||
temp_env::with_vars(
|
||||
// The following environment variables will be used to override the values in the config file.
|
||||
[
|
||||
(
|
||||
// storage.manifest.checkpoint_margin = 99
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"checkpoint_margin".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("99"),
|
||||
),
|
||||
(
|
||||
// storage.type = S3
|
||||
[
|
||||
@@ -216,17 +208,6 @@ mod tests {
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("mybucket"),
|
||||
),
|
||||
(
|
||||
// storage.manifest.gc_duration = 42s
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"storage".to_uppercase(),
|
||||
"manifest".to_uppercase(),
|
||||
"gc_duration".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
Some("42s"),
|
||||
),
|
||||
(
|
||||
// wal.dir = /other/wal/dir
|
||||
[
|
||||
@@ -257,17 +238,12 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
|
||||
match opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
}
|
||||
assert_eq!(
|
||||
opts.storage.manifest.gc_duration,
|
||||
Some(Duration::from_secs(42))
|
||||
);
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
|
||||
@@ -15,21 +15,23 @@
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
||||
use common_meta::cache_invalidator::DummyCacheInvalidator;
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::ddl::DdlTaskExecutorRef;
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::standalone::StandaloneTableMetadataCreator;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
@@ -42,9 +44,9 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StopProcedureManagerSnafu,
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result,
|
||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
StartProcedureManagerSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
@@ -156,6 +158,7 @@ impl StandaloneOptions {
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
region_engine: self.region_engine,
|
||||
rpc_addr: self.grpc.addr,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -169,9 +172,7 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
self.datanode.start_telemetry();
|
||||
|
||||
self.procedure_manager
|
||||
.start()
|
||||
@@ -229,6 +230,9 @@ struct StartCommand {
|
||||
user_provider: Option<String>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||
env_prefix: String,
|
||||
/// The working home directory of this standalone instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -259,6 +263,10 @@ impl StartCommand {
|
||||
opts.http.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
|
||||
@@ -316,19 +324,17 @@ impl StartCommand {
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
let mut fe_opts = opts.frontend.clone();
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let fe_opts = opts.frontend.clone();
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&fe_opts)
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode.clone();
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!(
|
||||
"Standalone frontend options: {:#?}, datanode options: {:#?}",
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
info!("Building standalone instance with {opts:#?}");
|
||||
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||
@@ -344,38 +350,25 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let datanode = DatanodeBuilder::new(
|
||||
dn_opts.clone(),
|
||||
Some(kv_backend.clone()),
|
||||
Default::default(),
|
||||
)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let region_server = datanode.region_server();
|
||||
let builder =
|
||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
||||
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
kv_backend.clone(),
|
||||
Arc::new(DummyKvCacheInvalidator),
|
||||
Arc::new(StandaloneDatanodeManager(region_server.clone())),
|
||||
);
|
||||
|
||||
catalog_manager
|
||||
.table_metadata_manager_ref()
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
|
||||
// TODO: build frontend instance like in distributed mode
|
||||
let mut frontend = build_frontend(
|
||||
fe_plugins,
|
||||
kv_backend,
|
||||
procedure_manager.clone(),
|
||||
catalog_manager,
|
||||
region_server,
|
||||
datanode_manager.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
|
||||
.with_plugin(fe_plugins)
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend
|
||||
.build_servers(opts)
|
||||
.await
|
||||
@@ -387,26 +380,41 @@ impl StartCommand {
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
plugins: Plugins,
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
region_server: RegionServer,
|
||||
) -> Result<FeInstance> {
|
||||
let frontend_instance = FeInstance::try_new_standalone(
|
||||
kv_backend,
|
||||
procedure_manager,
|
||||
catalog_manager,
|
||||
plugins,
|
||||
region_server,
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
Ok(frontend_instance)
|
||||
async fn create_ddl_task_executor(
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Result<DdlTaskExecutorRef> {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
|
||||
let ddl_task_executor: DdlTaskExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
procedure_manager,
|
||||
datanode_manager,
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
Arc::new(StandaloneTableMetadataCreator::new(kv_backend)),
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(ddl_task_executor)
|
||||
}
|
||||
|
||||
async fn create_table_metadata_manager(
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Result<TableMetadataManagerRef> {
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||
|
||||
table_metadata_manager
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
|
||||
Ok(table_metadata_manager)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -8,8 +8,8 @@ license.workspace = true
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -5,11 +5,9 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -17,15 +17,17 @@ async-compression = { version = "0.3", features = [
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-runtime.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store = { workspace = true }
|
||||
object-store.workspace = true
|
||||
orc-rust = "0.2"
|
||||
parquet.workspace = true
|
||||
paste = "1.0"
|
||||
regex = "1.7"
|
||||
serde.workspace = true
|
||||
@@ -36,4 +38,4 @@ tokio.workspace = true
|
||||
url = "2.3"
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
|
||||
@@ -26,7 +26,9 @@ use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
|
||||
use crate::error::{self, Error, Result};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum CompressionType {
|
||||
/// Gzip-ed file
|
||||
Gzip,
|
||||
|
||||
@@ -166,6 +166,14 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Buffered writer closed"))]
|
||||
BufferedWriterClosed { location: Location },
|
||||
|
||||
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||
WriteParquet {
|
||||
path: String,
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: parquet::errors::ParquetError,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -178,7 +186,8 @@ impl ErrorExt for Error {
|
||||
| ListObjects { .. }
|
||||
| ReadObject { .. }
|
||||
| WriteObject { .. }
|
||||
| AsyncWrite { .. } => StatusCode::StorageUnavailable,
|
||||
| AsyncWrite { .. }
|
||||
| WriteParquet { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
UnsupportedBackendProtocol { .. }
|
||||
| UnsupportedCompressionType { .. }
|
||||
@@ -231,6 +240,7 @@ impl ErrorExt for Error {
|
||||
InvalidConnection { location, .. } => Some(*location),
|
||||
UnsupportedCompressionType { location, .. } => Some(*location),
|
||||
UnsupportedFormat { location, .. } => Some(*location),
|
||||
WriteParquet { location, .. } => Some(*location),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,11 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use arrow_schema::Schema;
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
||||
use datafusion::error::Result as DatafusionResult;
|
||||
@@ -26,11 +28,15 @@ use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
|
||||
use datafusion::parquet::file::metadata::ParquetMetaData;
|
||||
use datafusion::parquet::format::FileMetaData;
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::StreamExt;
|
||||
use object_store::{ObjectStore, Reader};
|
||||
use parquet::basic::{Compression, ZstdLevel};
|
||||
use parquet::file::properties::WriterProperties;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
|
||||
use crate::error::{self, Result};
|
||||
use crate::file_format::FileFormat;
|
||||
use crate::share_buffer::SharedBuffer;
|
||||
@@ -156,6 +162,103 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Parquet writer that buffers row groups in memory and writes buffered data to an underlying
|
||||
/// storage by chunks to reduce memory consumption.
|
||||
pub struct BufferedWriter {
|
||||
inner: InnerBufferedWriter,
|
||||
}
|
||||
|
||||
type InnerBufferedWriter = LazyBufferedWriter<
|
||||
object_store::Writer,
|
||||
ArrowWriter<SharedBuffer>,
|
||||
Box<
|
||||
dyn FnMut(
|
||||
String,
|
||||
)
|
||||
-> Pin<Box<dyn Future<Output = error::Result<object_store::Writer>> + Send>>
|
||||
+ Send,
|
||||
>,
|
||||
>;
|
||||
|
||||
impl BufferedWriter {
|
||||
pub async fn try_new(
|
||||
path: String,
|
||||
store: ObjectStore,
|
||||
arrow_schema: SchemaRef,
|
||||
props: Option<WriterProperties>,
|
||||
buffer_threshold: usize,
|
||||
) -> error::Result<Self> {
|
||||
let buffer = SharedBuffer::with_capacity(buffer_threshold);
|
||||
|
||||
let arrow_writer = ArrowWriter::try_new(buffer.clone(), arrow_schema.clone(), props)
|
||||
.context(error::WriteParquetSnafu { path: &path })?;
|
||||
|
||||
Ok(Self {
|
||||
inner: LazyBufferedWriter::new(
|
||||
buffer_threshold,
|
||||
buffer,
|
||||
arrow_writer,
|
||||
&path,
|
||||
Box::new(move |path| {
|
||||
let store = store.clone();
|
||||
Box::pin(async move {
|
||||
store
|
||||
.writer(&path)
|
||||
.await
|
||||
.context(error::WriteObjectSnafu { path })
|
||||
})
|
||||
}),
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
/// Write a record batch to stream writer.
|
||||
pub async fn write(&mut self, arrow_batch: &RecordBatch) -> error::Result<()> {
|
||||
self.inner.write(arrow_batch).await?;
|
||||
self.inner.try_flush(false).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Close parquet writer.
|
||||
///
|
||||
/// Return file metadata and bytes written.
|
||||
pub async fn close(self) -> error::Result<(FileMetaData, u64)> {
|
||||
self.inner.close_with_arrow_writer().await
|
||||
}
|
||||
}
|
||||
|
||||
/// Output the stream to a parquet file.
|
||||
///
|
||||
/// Returns number of rows written.
|
||||
pub async fn stream_to_parquet(
|
||||
mut stream: SendableRecordBatchStream,
|
||||
store: ObjectStore,
|
||||
path: &str,
|
||||
threshold: usize,
|
||||
) -> Result<usize> {
|
||||
let write_props = WriterProperties::builder()
|
||||
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||
.build();
|
||||
let schema = stream.schema();
|
||||
let mut buffered_writer = BufferedWriter::try_new(
|
||||
path.to_string(),
|
||||
store,
|
||||
schema,
|
||||
Some(write_props),
|
||||
threshold,
|
||||
)
|
||||
.await?;
|
||||
let mut rows_written = 0;
|
||||
while let Some(batch) = stream.next().await {
|
||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||
buffered_writer.write(&batch).await?;
|
||||
rows_written += batch.num_rows();
|
||||
}
|
||||
buffered_writer.close().await?;
|
||||
Ok(rows_written)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_test_util::find_workspace_path;
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
[package]
|
||||
name = "decimal"
|
||||
name = "common-decimal"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
bigdecimal = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
rust_decimal = { workspace = true }
|
||||
bigdecimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
rust_decimal.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -43,7 +43,7 @@ const BYTES_TO_OVERFLOW_RUST_DECIMAL: usize = 28;
|
||||
/// **precision**: the total number of digits in the number, it's range is \[1, 38\].
|
||||
///
|
||||
/// **scale**: the number of digits to the right of the decimal point, it's range is \[0, precision\].
|
||||
#[derive(Debug, Default, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
pub struct Decimal128 {
|
||||
value: i128,
|
||||
precision: u8,
|
||||
@@ -51,8 +51,18 @@ pub struct Decimal128 {
|
||||
}
|
||||
|
||||
impl Decimal128 {
|
||||
/// Create a new Decimal128 from i128, precision and scale.
|
||||
pub fn new_unchecked(value: i128, precision: u8, scale: i8) -> Self {
|
||||
/// Create a new Decimal128 from i128, precision and scale without any validation.
|
||||
pub fn new(value: i128, precision: u8, scale: i8) -> Self {
|
||||
// debug assert precision and scale is valid
|
||||
debug_assert!(
|
||||
precision > 0 && precision <= DECIMAL128_MAX_PRECISION,
|
||||
"precision should be in [1, {}]",
|
||||
DECIMAL128_MAX_PRECISION
|
||||
);
|
||||
debug_assert!(
|
||||
scale >= 0 && scale <= precision as i8,
|
||||
"scale should be in [0, precision]"
|
||||
);
|
||||
Self {
|
||||
value,
|
||||
precision,
|
||||
@@ -60,6 +70,7 @@ impl Decimal128 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Try new Decimal128 from i128, precision and scale with validation.
|
||||
pub fn try_new(value: i128, precision: u8, scale: i8) -> error::Result<Self> {
|
||||
// make sure the precision and scale is valid.
|
||||
valid_precision_and_scale(precision, scale)?;
|
||||
@@ -70,6 +81,7 @@ impl Decimal128 {
|
||||
})
|
||||
}
|
||||
|
||||
/// Return underlying value without precision and scale
|
||||
pub fn val(&self) -> i128 {
|
||||
self.value
|
||||
}
|
||||
@@ -84,10 +96,36 @@ impl Decimal128 {
|
||||
self.scale
|
||||
}
|
||||
|
||||
/// Convert to ScalarValue
|
||||
/// Convert to ScalarValue(value,precision,scale)
|
||||
pub fn to_scalar_value(&self) -> (Option<i128>, u8, i8) {
|
||||
(Some(self.value), self.precision, self.scale)
|
||||
}
|
||||
|
||||
/// split the self.value(i128) to (high-64 bit, low-64 bit), and
|
||||
/// the precision, scale information is discarded.
|
||||
///
|
||||
/// Return: (high-64 bit, low-64 bit)
|
||||
pub fn split_value(&self) -> (i64, i64) {
|
||||
((self.value >> 64) as i64, self.value as i64)
|
||||
}
|
||||
|
||||
/// Convert from precision, scale, a i128 value which
|
||||
/// represents by two i64 value(high-64 bit, low-64 bit).
|
||||
pub fn from_value_precision_scale(hi: i64, lo: i64, precision: u8, scale: i8) -> Self {
|
||||
let value = (hi as i128) << 64 | lo as i128;
|
||||
Self::new(value, precision, scale)
|
||||
}
|
||||
}
|
||||
|
||||
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
|
||||
impl Default for Decimal128 {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
value: 0,
|
||||
precision: 1,
|
||||
scale: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Decimal128 {
|
||||
@@ -270,7 +308,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_common_decimal128() {
|
||||
let decimal = Decimal128::new_unchecked(123456789, 9, 3);
|
||||
let decimal = Decimal128::new(123456789, 9, 3);
|
||||
assert_eq!(decimal.to_string(), "123456.789");
|
||||
|
||||
let decimal = Decimal128::try_new(123456789, 9, 0);
|
||||
|
||||
@@ -14,3 +14,5 @@
|
||||
|
||||
pub mod decimal128;
|
||||
pub mod error;
|
||||
|
||||
pub use decimal128::Decimal128;
|
||||
|
||||
@@ -5,5 +5,5 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
strum.workspace = true
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(error_iter)]
|
||||
|
||||
pub mod ext;
|
||||
|
||||
@@ -7,12 +7,12 @@ license.workspace = true
|
||||
[dependencies]
|
||||
arc-swap = "1.0"
|
||||
chrono-tz = "0.6"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
libc = "0.2"
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_unixtime;
|
||||
|
||||
@@ -6,19 +6,19 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
common-test-util = { workspace = true }
|
||||
common-test-util.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
tempfile.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
common-version = { workspace = true }
|
||||
common-version.workspace = true
|
||||
|
||||
@@ -5,18 +5,18 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
table = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-query.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
snafu.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -158,6 +158,7 @@ mod tests {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: None,
|
||||
}],
|
||||
@@ -199,6 +200,7 @@ mod tests {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: Some(Location {
|
||||
location_type: LocationType::First.into(),
|
||||
@@ -213,6 +215,7 @@ mod tests {
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
}),
|
||||
location: Some(Location {
|
||||
location_type: LocationType::After.into(),
|
||||
|
||||
@@ -36,14 +36,16 @@ pub fn to_table_delete_request(
|
||||
values,
|
||||
null_mask,
|
||||
datatype,
|
||||
datatype_extension,
|
||||
..
|
||||
} in request.key_columns
|
||||
{
|
||||
let Some(values) = values else { continue };
|
||||
|
||||
let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
|
||||
.context(ColumnDataTypeSnafu)?
|
||||
.into();
|
||||
let datatype: ConcreteDataType =
|
||||
ColumnDataTypeWrapper::try_new(datatype, datatype_extension)
|
||||
.context(ColumnDataTypeSnafu)?
|
||||
.into();
|
||||
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
|
||||
|
||||
ensure!(
|
||||
|
||||
@@ -119,7 +119,7 @@ mod tests {
|
||||
nullable: bool,
|
||||
) -> error::Result<ColumnSchema> {
|
||||
let datatype_wrapper =
|
||||
ColumnDataTypeWrapper::try_new(datatype).context(ColumnDataTypeSnafu)?;
|
||||
ColumnDataTypeWrapper::try_new(datatype, None).context(ColumnDataTypeSnafu)?;
|
||||
|
||||
Ok(ColumnSchema::new(
|
||||
column_name,
|
||||
@@ -170,7 +170,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "host")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -184,7 +185,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "cpu")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -198,7 +200,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "memory")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -212,7 +215,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "time")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -226,7 +230,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "interval")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -240,7 +245,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "duration")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -254,7 +260,8 @@ mod tests {
|
||||
.iter()
|
||||
.find(|c| c.name == "ts")
|
||||
.unwrap()
|
||||
.data_type
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -284,8 +291,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(host_column.column_def.as_ref().unwrap().data_type)
|
||||
.unwrap()
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
host_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
@@ -294,7 +304,8 @@ mod tests {
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
memory_column.column_def.as_ref().unwrap().data_type
|
||||
memory_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -304,8 +315,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(time_column.column_def.as_ref().unwrap().data_type)
|
||||
.unwrap()
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
time_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
@@ -314,7 +328,8 @@ mod tests {
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
interval_column.column_def.as_ref().unwrap().data_type
|
||||
interval_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -326,7 +341,8 @@ mod tests {
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
duration_column.column_def.as_ref().unwrap().data_type
|
||||
duration_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
@@ -360,6 +376,7 @@ mod tests {
|
||||
values: Some(host_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let cpu_vals = Values {
|
||||
@@ -372,6 +389,7 @@ mod tests {
|
||||
values: Some(cpu_vals),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_vals = Values {
|
||||
@@ -384,6 +402,7 @@ mod tests {
|
||||
values: Some(mem_vals),
|
||||
null_mask: vec![1],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let time_vals = Values {
|
||||
@@ -396,6 +415,7 @@ mod tests {
|
||||
values: Some(time_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let interval1 = IntervalMonthDayNano {
|
||||
@@ -418,6 +438,7 @@ mod tests {
|
||||
values: Some(interval_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let duration_vals = Values {
|
||||
@@ -430,6 +451,7 @@ mod tests {
|
||||
values: Some(duration_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::DurationMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let ts_vals = Values {
|
||||
@@ -442,6 +464,7 @@ mod tests {
|
||||
values: Some(ts_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
(
|
||||
|
||||
@@ -121,6 +121,7 @@ pub fn build_create_table_expr(
|
||||
default_constraint: vec![],
|
||||
semantic_type,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
};
|
||||
column_defs.push(column_def);
|
||||
}
|
||||
@@ -161,6 +162,7 @@ pub fn extract_new_columns(
|
||||
default_constraint: vec![],
|
||||
semantic_type: expr.semantic_type,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
});
|
||||
AddColumn {
|
||||
column_def,
|
||||
|
||||
@@ -5,25 +5,25 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
api.workspace = true
|
||||
arrow-flight.workspace = true
|
||||
async-trait = "0.1"
|
||||
backtrace = "0.3"
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
datatypes.workspace = true
|
||||
flatbuffers = "23.1"
|
||||
futures = "0.3"
|
||||
lazy_static.workspace = true
|
||||
prost.workspace = true
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
snafu.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tower = "0.4"
|
||||
|
||||
@@ -12,18 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::helper::convert_i128_to_interval;
|
||||
use api::helper::{convert_i128_to_interval, convert_to_pb_decimal128};
|
||||
use api::v1::column::Values;
|
||||
use common_base::BitVec;
|
||||
use datatypes::types::{DurationType, IntervalType, TimeType, TimestampType, WrapperType};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
|
||||
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
|
||||
Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
|
||||
UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int16Vector, Int32Vector, Int64Vector,
|
||||
Int8Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt16Vector, UInt32Vector, UInt64Vector,
|
||||
UInt8Vector, VectorRef,
|
||||
};
|
||||
use snafu::OptionExt;
|
||||
|
||||
@@ -237,6 +238,12 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
DurationNanosecondVector,
|
||||
duration_nanosecond_values,
|
||||
|x| { x.into_native() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Decimal128(_),
|
||||
Decimal128Vector,
|
||||
decimal128_values,
|
||||
|x| { convert_to_pb_decimal128(x) }
|
||||
)
|
||||
)
|
||||
}
|
||||
@@ -314,6 +321,17 @@ mod tests {
|
||||
assert_eq!(vec![1, 2, 3], values.duration_second_values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_array_decimal128() {
|
||||
let array = Decimal128Vector::from(vec![Some(1), Some(2), None, Some(3)]);
|
||||
|
||||
let vals = values(&[Arc::new(array)]).unwrap();
|
||||
(0..3).for_each(|i| {
|
||||
assert_eq!(vals.decimal128_values[i].hi, 0);
|
||||
assert_eq!(vals.decimal128_values[i].lo, i as i64 + 1);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_arrow_arrays_string() {
|
||||
let array = StringVector::from(vec![
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use api::helper::values_with_capacity;
|
||||
use api::v1::{Column, ColumnDataType, SemanticType};
|
||||
use api::v1::{Column, ColumnDataType, ColumnDataTypeExtension, SemanticType};
|
||||
use common_base::BitVec;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use snafu::ensure;
|
||||
@@ -50,6 +50,7 @@ impl LinesWriter {
|
||||
column_name,
|
||||
ColumnDataType::TimestampMillisecond,
|
||||
SemanticType::Timestamp,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::TimestampMillisecond as i32,
|
||||
@@ -69,7 +70,8 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_tag(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||
let (idx, column) = self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag);
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag, None);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::String as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -86,8 +88,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_u64(&mut self, column_name: &str, value: u64) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Uint64, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Uint64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Uint64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -104,8 +110,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_i64(&mut self, column_name: &str, value: i64) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Int64, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Int64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Int64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -122,8 +132,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_f64(&mut self, column_name: &str, value: f64) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Float64, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Float64,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Float64 as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -140,8 +154,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_string(&mut self, column_name: &str, value: &str) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::String, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::String,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::String as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -158,8 +176,12 @@ impl LinesWriter {
|
||||
}
|
||||
|
||||
pub fn write_bool(&mut self, column_name: &str, value: bool) -> Result<()> {
|
||||
let (idx, column) =
|
||||
self.mut_column(column_name, ColumnDataType::Boolean, SemanticType::Field);
|
||||
let (idx, column) = self.mut_column(
|
||||
column_name,
|
||||
ColumnDataType::Boolean,
|
||||
SemanticType::Field,
|
||||
None,
|
||||
);
|
||||
ensure!(
|
||||
column.datatype == ColumnDataType::Boolean as i32,
|
||||
TypeMismatchSnafu {
|
||||
@@ -201,6 +223,7 @@ impl LinesWriter {
|
||||
column_name: &str,
|
||||
datatype: ColumnDataType,
|
||||
semantic_type: SemanticType,
|
||||
datatype_extension: Option<ColumnDataTypeExtension>,
|
||||
) -> (usize, &mut Column) {
|
||||
let column_names = &mut self.column_name_index;
|
||||
let column_idx = match column_names.get(column_name) {
|
||||
@@ -218,6 +241,7 @@ impl LinesWriter {
|
||||
values: Some(values_with_capacity(datatype, to_insert)),
|
||||
datatype: datatype as i32,
|
||||
null_mask: Vec::default(),
|
||||
datatype_extension,
|
||||
});
|
||||
let _ = column_names.insert(column_name.to_string(), new_idx);
|
||||
new_idx
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -8,8 +8,6 @@ license.workspace = true
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
backtrace = "0.3"
|
||||
common-telemetry = { workspace = true }
|
||||
proc-macro2 = "1.0.66"
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
@@ -25,7 +23,7 @@ syn2 = { version = "2.0", package = "syn", features = [
|
||||
|
||||
[dev-dependencies]
|
||||
arc-swap = "1.0"
|
||||
common-query = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
common-query.workspace = true
|
||||
datatypes.workspace = true
|
||||
snafu.workspace = true
|
||||
static_assertions = "1.1.0"
|
||||
|
||||
@@ -5,8 +5,8 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
snafu.workspace = true
|
||||
tempfile = "3.4"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -8,23 +8,22 @@ license.workspace = true
|
||||
testing = []
|
||||
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
api.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64 = "0.21"
|
||||
base64.workspace = true
|
||||
bytes = "1.4"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
common-macro = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
common-macro.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
@@ -35,13 +34,14 @@ regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api = { workspace = true }
|
||||
store-api.workspace = true
|
||||
strum.workspace = true
|
||||
table = { workspace = true }
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
common-procedure = { workspace = true, features = ["testing"] }
|
||||
datatypes.workspace = true
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
@@ -34,6 +35,7 @@ pub mod utils;
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExecutorContext {
|
||||
pub cluster_id: Option<u64>,
|
||||
pub tracing_context: Option<W3cTrace>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -26,6 +26,7 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status,
|
||||
};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, info};
|
||||
use futures::future;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -207,7 +208,7 @@ impl AlterTableProcedure {
|
||||
let request = self.create_alter_region_request(region_id)?;
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Alter(request)),
|
||||
|
||||
@@ -21,6 +21,7 @@ use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -131,6 +132,7 @@ impl CreateTableProcedure {
|
||||
default_constraint: c.default_constraint.clone(),
|
||||
semantic_type: semantic_type as i32,
|
||||
comment: String::new(),
|
||||
datatype_extension: c.datatype_extension.clone(),
|
||||
}),
|
||||
column_id: i as u32,
|
||||
}
|
||||
@@ -199,7 +201,7 @@ impl CreateTableProcedure {
|
||||
for request in requests {
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(request),
|
||||
|
||||
@@ -22,6 +22,7 @@ use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, info};
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -85,6 +86,10 @@ impl DropTableProcedure {
|
||||
))
|
||||
.await?;
|
||||
|
||||
if !exist && self.data.task.drop_if_exists {
|
||||
return Ok(Status::Done);
|
||||
}
|
||||
|
||||
ensure!(
|
||||
exist,
|
||||
error::TableNotFoundSnafu {
|
||||
@@ -157,7 +162,7 @@ impl DropTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
|
||||
@@ -21,6 +21,7 @@ use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -154,7 +155,7 @@ impl TruncateTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{info, tracing};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
@@ -42,9 +43,9 @@ use crate::rpc::ddl::{
|
||||
TruncateTableTask,
|
||||
};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
/// The [DdlManager] provides the ability to execute Ddl.
|
||||
pub struct DdlManager {
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
@@ -54,26 +55,31 @@ pub struct DdlManager {
|
||||
}
|
||||
|
||||
impl DdlManager {
|
||||
pub fn new(
|
||||
/// Returns a new [DdlManager] with all Ddl [BoxedProcedureLoader](common_procedure::procedure::BoxedProcedureLoader)s registered.
|
||||
pub fn try_new(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_clients: DatanodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
procedure_manager,
|
||||
datanode_manager: datanode_clients,
|
||||
cache_invalidator,
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
}
|
||||
};
|
||||
manager.register_loaders()?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// Returns the [TableMetadataManagerRef].
|
||||
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
|
||||
&self.table_metadata_manager
|
||||
}
|
||||
|
||||
/// Returns the [DdlContext]
|
||||
pub fn create_context(&self) -> DdlContext {
|
||||
DdlContext {
|
||||
datanode_manager: self.datanode_manager.clone(),
|
||||
@@ -82,7 +88,7 @@ impl DdlManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_start(&self) -> Result<()> {
|
||||
fn register_loaders(&self) -> Result<()> {
|
||||
let context = self.create_context();
|
||||
|
||||
self.procedure_manager
|
||||
@@ -140,6 +146,8 @@ impl DdlManager {
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes an alter table task.
|
||||
pub async fn submit_alter_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -156,6 +164,8 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a create table task.
|
||||
pub async fn submit_create_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -172,6 +182,8 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a drop table task.
|
||||
pub async fn submit_drop_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -194,6 +206,8 @@ impl DdlManager {
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
/// Submits and executes a truncate table task.
|
||||
pub async fn submit_truncate_table_task(
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
@@ -383,21 +397,108 @@ impl DdlTaskExecutor for DdlManager {
|
||||
ctx: &ExecutorContext,
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
info!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => {
|
||||
handle_drop_table_task(self, cluster_id, drop_table_task).await
|
||||
}
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, cluster_id, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
|
||||
let span = ctx
|
||||
.tracing_context
|
||||
.as_ref()
|
||||
.map(TracingContext::from_w3c)
|
||||
.unwrap_or(TracingContext::from_current_span())
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
info!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => {
|
||||
handle_drop_table_task(self, cluster_id, drop_table_task).await
|
||||
}
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, cluster_id, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
|
||||
}
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_procedure::local::LocalManager;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::DdlManager;
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
use crate::datanode_manager::{DatanodeManager, DatanodeRef};
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{TableMetadataAllocator, TableMetadataAllocatorContext};
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::state_store::KvStateStore;
|
||||
|
||||
/// A dummy implemented [DatanodeManager].
|
||||
pub struct DummyDatanodeManager;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DatanodeManager for DummyDatanodeManager {
|
||||
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy implemented [TableMetadataAllocator].
|
||||
pub struct DummyTableMetadataAllocator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMetadataAllocator for DummyTableMetadataAllocator {
|
||||
async fn create(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
_table_info: &mut RawTableInfo,
|
||||
_partitions: &[Partition],
|
||||
) -> Result<(TableId, Vec<RegionRoute>)> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_new() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
|
||||
|
||||
let state_store = Arc::new(KvStateStore::new(kv_backend));
|
||||
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
procedure_manager.clone(),
|
||||
Arc::new(DummyDatanodeManager),
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
Arc::new(DummyTableMetadataAllocator),
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
CreateTableProcedure::TYPE_NAME,
|
||||
AlterTableProcedure::TYPE_NAME,
|
||||
DropTableProcedure::TYPE_NAME,
|
||||
TruncateTableProcedure::TYPE_NAME,
|
||||
];
|
||||
|
||||
for loader in expected_loaders {
|
||||
assert!(procedure_manager.contains_loader(loader));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -33,5 +33,8 @@ pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
|
||||
/// The lease seconds of metasrv leader.
|
||||
pub const META_LEASE_SECS: u64 = 3;
|
||||
|
||||
// In a lease, there are two opportunities for renewal.
|
||||
/// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
/// The default mailbox round-trip timeout.
|
||||
pub const MAILBOX_RTT_SECS: u64 = 1;
|
||||
|
||||
@@ -37,7 +37,7 @@ pub struct HeartbeatResponseHandlerContext {
|
||||
/// HandleControl
|
||||
///
|
||||
/// Controls process of handling heartbeat response.
|
||||
#[derive(PartialEq)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum HandleControl {
|
||||
Continue,
|
||||
Done,
|
||||
|
||||
@@ -30,8 +30,8 @@ pub struct MessageMeta {
|
||||
pub from: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl MessageMeta {
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_test(id: u64, subject: &str, to: &str, from: &str) -> Self {
|
||||
MessageMeta {
|
||||
id,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -48,6 +48,27 @@ impl Display for RegionIdent {
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of downgrade leader region.
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct DowngradeRegionReply {
|
||||
/// Returns the `last_entry_id` if available.
|
||||
pub last_entry_id: Option<u64>,
|
||||
/// Indicates whether the region exists.
|
||||
pub exists: bool,
|
||||
/// Return error if any during the operation.
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl Display for DowngradeRegionReply {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"(last_entry_id={:?}, exists={}, error={:?})",
|
||||
self.last_entry_id, self.exists, self.error
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct SimpleReply {
|
||||
pub result: bool,
|
||||
@@ -87,20 +108,82 @@ impl OpenRegion {
|
||||
}
|
||||
}
|
||||
|
||||
/// The instruction of downgrading leader region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DowngradeRegion {
|
||||
/// The [RegionId].
|
||||
pub region_id: RegionId,
|
||||
}
|
||||
|
||||
impl Display for DowngradeRegion {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "DowngradeRegion(region_id={})", self.region_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Upgrades a follower region to leader region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpgradeRegion {
|
||||
/// The [RegionId].
|
||||
pub region_id: RegionId,
|
||||
/// The `last_entry_id` of old leader region.
|
||||
pub last_entry_id: Option<u64>,
|
||||
/// The second of waiting for a wal replay.
|
||||
///
|
||||
/// `None` stands for no wait,
|
||||
/// it's helpful to verify whether the leader region is ready.
|
||||
pub wait_for_replay_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Display)]
|
||||
pub enum Instruction {
|
||||
/// Opens a region.
|
||||
///
|
||||
/// - Returns true if a specified region exists.
|
||||
OpenRegion(OpenRegion),
|
||||
/// Closes a region.
|
||||
///
|
||||
/// - Returns true if a specified region does not exist.
|
||||
CloseRegion(RegionIdent),
|
||||
/// Upgrades a region.
|
||||
UpgradeRegion(UpgradeRegion),
|
||||
/// Downgrades a region.
|
||||
DowngradeRegion(DowngradeRegion),
|
||||
/// Invalidates a specified table cache.
|
||||
InvalidateTableIdCache(TableId),
|
||||
/// Invalidates a specified table name index cache.
|
||||
InvalidateTableNameCache(TableName),
|
||||
}
|
||||
|
||||
/// The reply of [UpgradeRegion].
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct UpgradeRegionReply {
|
||||
/// Returns true if `last_entry_id` has been replayed to the latest.
|
||||
pub ready: bool,
|
||||
/// Indicates whether the region exists.
|
||||
pub exists: bool,
|
||||
/// Returns error if any.
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl Display for UpgradeRegionReply {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"(ready={}, exists={}, error={:?})",
|
||||
self.ready, self.exists, self.error
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum InstructionReply {
|
||||
OpenRegion(SimpleReply),
|
||||
CloseRegion(SimpleReply),
|
||||
UpgradeRegion(UpgradeRegionReply),
|
||||
InvalidateTableCache(SimpleReply),
|
||||
DowngradeRegion(DowngradeRegionReply),
|
||||
}
|
||||
|
||||
impl Display for InstructionReply {
|
||||
@@ -108,9 +191,13 @@ impl Display for InstructionReply {
|
||||
match self {
|
||||
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
|
||||
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
|
||||
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
|
||||
Self::InvalidateTableCache(reply) => {
|
||||
write!(f, "InstructionReply::Invalidate({})", reply)
|
||||
}
|
||||
Self::DowngradeRegion(reply) => {
|
||||
write!(f, "InstructionReply::DowngradeRegion({})", reply)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -64,6 +64,7 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::warn;
|
||||
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -83,12 +84,12 @@ use crate::ddl::utils::region_storage_path;
|
||||
use crate::error::{self, Result, SerdeJsonSnafu};
|
||||
use crate::kv_backend::txn::Txn;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute};
|
||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
|
||||
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
@@ -583,7 +584,7 @@ impl TableMetadataManager {
|
||||
&self,
|
||||
table_id: TableId,
|
||||
region_info: RegionInfo,
|
||||
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_region_routes: Vec<RegionRoute>,
|
||||
new_region_options: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
@@ -605,7 +606,7 @@ impl TableMetadataManager {
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.build_update_txn(table_id, ¤t_table_route_value, &new_table_route_value)?;
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
|
||||
|
||||
@@ -625,6 +626,56 @@ impl TableMetadataManager {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the leader status of the [RegionRoute].
|
||||
pub async fn update_leader_region_status<F>(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
next_region_route_status: F,
|
||||
) -> Result<()>
|
||||
where
|
||||
F: Fn(&RegionRoute) -> Option<Option<RegionStatus>>,
|
||||
{
|
||||
let mut new_region_routes = current_table_route_value.region_routes.clone();
|
||||
|
||||
let mut updated = 0;
|
||||
for route in &mut new_region_routes {
|
||||
if let Some(status) = next_region_route_status(route) {
|
||||
if route.set_leader_status(status) {
|
||||
updated += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if updated == 0 {
|
||||
warn!("No leader status updated");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Updates the table_route.
|
||||
let new_table_route_value = current_table_route_value.update(new_region_routes);
|
||||
|
||||
let (update_table_route_txn, on_update_table_route_failure) = self
|
||||
.table_route_manager()
|
||||
.build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
|
||||
|
||||
let r = self.kv_backend.txn(update_table_route_txn).await?;
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating leader region status",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the updating leader region status";
|
||||
ensure_values!(remote_table_route, new_table_route_value, op_name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@@ -707,7 +758,7 @@ mod tests {
|
||||
use crate::key::{to_removed_key, DeserializedValueWithBytes, TableMetadataManager};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{region_distribution, Region, RegionRoute};
|
||||
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
||||
|
||||
#[test]
|
||||
fn test_deserialized_value_with_bytes() {
|
||||
@@ -1002,6 +1053,74 @@ mod tests {
|
||||
.is_err())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_table_leader_region_status() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv);
|
||||
let datanode = 1;
|
||||
let region_routes = vec![
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: 1.into(),
|
||||
name: "r1".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(datanode, "a2")),
|
||||
leader_status: Some(RegionStatus::Downgraded),
|
||||
follower_peers: vec![],
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(datanode, "a1")),
|
||||
leader_status: None,
|
||||
follower_peers: vec![],
|
||||
},
|
||||
];
|
||||
let table_info: RawTableInfo =
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let table_id = table_info.ident.table_id;
|
||||
let current_table_route_value =
|
||||
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table_metadata_manager
|
||||
.update_leader_region_status(table_id, ¤t_table_route_value, |region_route| {
|
||||
if region_route.leader_status.is_some() {
|
||||
None
|
||||
} else {
|
||||
Some(Some(RegionStatus::Downgraded))
|
||||
}
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let updated_route_value = table_metadata_manager
|
||||
.table_route_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
updated_route_value.region_routes[0].leader_status,
|
||||
Some(RegionStatus::Downgraded)
|
||||
);
|
||||
assert_eq!(
|
||||
updated_route_value.region_routes[1].leader_status,
|
||||
Some(RegionStatus::Downgraded)
|
||||
);
|
||||
}
|
||||
|
||||
async fn assert_datanode_table(
|
||||
table_metadata_manager: &TableMetadataManager,
|
||||
table_id: u32,
|
||||
@@ -1054,7 +1173,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
@@ -1071,7 +1190,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
@@ -1093,7 +1212,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
@@ -1118,7 +1237,7 @@ mod tests {
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
wrong_table_route_value,
|
||||
&wrong_table_route_value,
|
||||
new_region_routes,
|
||||
&HashMap::new(),
|
||||
)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user