mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 21:02:58 +00:00
Compare commits
58 Commits
v0.4.0-nig
...
v0.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9c2b3c91f | ||
|
|
7f75190fce | ||
|
|
0a394c73a2 | ||
|
|
ae95f23e05 | ||
|
|
6b39f5923d | ||
|
|
ed725d030f | ||
|
|
4fe7e162af | ||
|
|
8a5ef826b9 | ||
|
|
07be50403e | ||
|
|
8bdef9a348 | ||
|
|
d4577e7372 | ||
|
|
88f26673f0 | ||
|
|
19f300fc5a | ||
|
|
cc83764331 | ||
|
|
81aa7a4caf | ||
|
|
d68dd1f3eb | ||
|
|
9b3470b049 | ||
|
|
8cc862ff8a | ||
|
|
81ccb58fb4 | ||
|
|
ce3c10a86e | ||
|
|
007f7ba03c | ||
|
|
dfe68a7e0b | ||
|
|
d5e4fcaaff | ||
|
|
17b385a985 | ||
|
|
067917845f | ||
|
|
a680133acc | ||
|
|
0593c3bde3 | ||
|
|
0292445476 | ||
|
|
ff15bc41d6 | ||
|
|
657542c0b8 | ||
|
|
0ad3fb6040 | ||
|
|
b44e39f897 | ||
|
|
f50f2a84a9 | ||
|
|
fe783c7c1f | ||
|
|
00fe7d104e | ||
|
|
201acd152d | ||
|
|
04dbd835a1 | ||
|
|
e3d333258b | ||
|
|
10ecc30817 | ||
|
|
52ac093110 | ||
|
|
1f1d72bdb8 | ||
|
|
7edafc3407 | ||
|
|
ccd6de8d6b | ||
|
|
ee8d472aae | ||
|
|
9282e59a3b | ||
|
|
fbe2f2df46 | ||
|
|
db6ceda5f0 | ||
|
|
e352fb4495 | ||
|
|
a6116bb866 | ||
|
|
515ce825bd | ||
|
|
7fc9604735 | ||
|
|
a4282415f7 | ||
|
|
0bf26642a4 | ||
|
|
230a3026ad | ||
|
|
54e506a494 | ||
|
|
7ecfaa240f | ||
|
|
c0f080df26 | ||
|
|
f9351e4fb5 |
@@ -62,6 +62,16 @@ runs:
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push android dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Login to ACR
|
||||
uses: docker/login-action@v2
|
||||
continue-on-error: true
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.4.0
|
||||
NEXT_RELEASE_VERSION: v0.5.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
|
||||
337
Cargo.lock
generated
337
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
38
Cargo.toml
38
Cargo.toml
@@ -39,6 +39,7 @@ members = [
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
"src/script",
|
||||
@@ -54,39 +55,43 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0-nightly"
|
||||
version = "0.4.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "43.0" }
|
||||
etcd-client = "0.11"
|
||||
arrow-array = "43.0"
|
||||
arrow-flight = "43.0"
|
||||
arrow-schema = { version = "43.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.11"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "115c1080773be8a819e50b257fece9f839a0c836" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
metrics = "0.20"
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
||||
parquet = "43.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
@@ -108,8 +113,6 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
@@ -122,19 +125,18 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
@@ -148,20 +150,20 @@ meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
mito = { path = "src/mito" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
operator = { path = "src/operator" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
partition = { path = "src/partition" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
query = { path = "src/query" }
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
table-procedure = { path = "src/table-procedure" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
|
||||
31
Makefile
31
Makefile
@@ -55,11 +55,15 @@ else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_BUILD_EXTRA_OPTS)),)
|
||||
CARGO_BUILD_OPTS += ${CARGO_BUILD_EXTRA_OPTS}
|
||||
endif
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build debug version greptime.
|
||||
cargo build ${CARGO_BUILD_OPTS}
|
||||
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
||||
|
||||
.POHNY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
@@ -67,11 +71,34 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=${TARGET_DIR} \
|
||||
TARGET=${TARGET} \
|
||||
RELEASE=${RELEASE}
|
||||
RELEASE=${RELEASE} \
|
||||
CARGO_BUILD_EXTRA_OPTS="${CARGO_BUILD_EXTRA_OPTS}"
|
||||
|
||||
.PHONY: build-android-bin
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
FEATURES="${FEATURES}" \
|
||||
TARGET_DIR="${TARGET_DIR}" \
|
||||
TARGET="${TARGET}" \
|
||||
RELEASE="${RELEASE}" \
|
||||
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
|
||||
|
||||
.PHONY: strip-android-bin
|
||||
strip-android-bin: ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
|
||||
@@ -27,6 +27,14 @@
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
## Upcoming Event
|
||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
||||
<p align="center">
|
||||
<picture>
|
||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## What is GreptimeDB
|
||||
|
||||
GreptimeDB is an open-source time-series database with a special focus on
|
||||
|
||||
@@ -13,17 +13,19 @@ rpc_runtime_size = 8
|
||||
require_lease_before_startup = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
|
||||
interval_millis = 5000
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 3000 by default.
|
||||
interval_millis = 3000
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Heartbeat timeout in milliseconds, 500 by default.
|
||||
heartbeat_timeout_millis = 500
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 5000
|
||||
connect_timeout_millis = 1000
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -45,6 +47,12 @@ type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256Mib"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
|
||||
@@ -62,7 +62,7 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 5000
|
||||
connect_timeout_millis = 1000
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
|
||||
@@ -115,6 +115,10 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256Mib"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
|
||||
41
docker/dev-builder/android/Dockerfile
Normal file
41
docker/dev-builder/android/Dockerfile
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM --platform=linux/amd64 saschpe/android-ndk:34-jdk17.0.8_7-ndk25.2.9519653-cmake3.22.1
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Rename libunwind to libgcc
|
||||
RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libunwind.a ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libgcc.a
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
|
||||
# Trust workdir
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Add android toolchains
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
RUN rustup target add aarch64-linux-android
|
||||
|
||||
# Install cargo-ndk
|
||||
RUN cargo install cargo-ndk
|
||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||
|
||||
# Builder entrypoint.
|
||||
CMD ["cargo", "ndk", "--platform", "23", "-t", "aarch64-linux-android", "build", "--bin", "greptime", "--profile", "release", "--no-default-features"]
|
||||
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
@@ -7,6 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
greptime-proto.workspace = true
|
||||
|
||||
@@ -16,14 +16,16 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType { datatype: i32, location: Location },
|
||||
|
||||
@@ -4,8 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = []
|
||||
testing = []
|
||||
@@ -14,6 +12,7 @@ testing = []
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
digest = "0.10"
|
||||
hex = { version = "0.4" }
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid config value: {}, {}", value, msg))]
|
||||
InvalidConfig { value: String, msg: String },
|
||||
@@ -30,7 +32,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("IO error"))]
|
||||
Io {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ async-trait = "0.1"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
@@ -30,7 +31,7 @@ futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
@@ -17,14 +17,16 @@ use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
@@ -92,7 +94,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
source: serde_json::error::Error,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -142,7 +145,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable { source: JoinError },
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
@@ -210,10 +216,10 @@ pub enum Error {
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display("msg: {}", msg))]
|
||||
#[snafu(display(""))]
|
||||
Datafusion {
|
||||
msg: String,
|
||||
source: DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -18,9 +18,7 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{
|
||||
CacheInvalidator, Context, KvCacheInvalidatorRef, TableMetadataCacheInvalidator,
|
||||
};
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
@@ -54,7 +52,7 @@ pub struct KvBackendCatalogManager {
|
||||
// TODO(LFC): Maybe use a real implementation for Standalone mode.
|
||||
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
|
||||
// is implemented by RaftEngine. Maybe we need a cache for it?
|
||||
table_metadata_cache_invalidator: TableMetadataCacheInvalidator,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
@@ -65,13 +63,13 @@ pub struct KvBackendCatalogManager {
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for KvBackendCatalogManager {
|
||||
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
self.table_metadata_cache_invalidator
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(ctx, table_name)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
|
||||
self.table_metadata_cache_invalidator
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(ctx, table_id)
|
||||
.await
|
||||
}
|
||||
@@ -80,15 +78,13 @@ impl CacheInvalidator for KvBackendCatalogManager {
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
backend: KvBackendRef,
|
||||
backend_cache_invalidator: KvCacheInvalidatorRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
table_metadata_cache_invalidator: TableMetadataCacheInvalidator::new(
|
||||
backend_cache_invalidator.clone(),
|
||||
),
|
||||
cache_invalidator,
|
||||
datanode_manager,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
@@ -107,12 +103,6 @@ impl KvBackendCatalogManager {
|
||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
||||
self.datanode_manager.clone()
|
||||
}
|
||||
|
||||
pub async fn invalidate_schema(&self, catalog: &str, schema: &str) {
|
||||
self.table_metadata_cache_invalidator
|
||||
.invalidate_schema(catalog, schema)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -229,6 +219,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
@@ -16,6 +16,7 @@ common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
@@ -26,7 +27,7 @@ datatypes = { workspace = true }
|
||||
derive_builder.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
moka = { version = "0.9", features = ["future"] }
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
@@ -42,14 +42,14 @@ async fn run() {
|
||||
.insert(vec![to_insert_request(weather_records_1())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_2())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
|
||||
let result = stream_inserter.finish().await;
|
||||
@@ -59,7 +59,7 @@ async fn run() {
|
||||
info!("Rows written: {rows}");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error: {e}");
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -276,7 +276,7 @@ impl Database {
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
error
|
||||
|
||||
@@ -17,11 +17,13 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||
IllegalFlightMessages { reason: String, location: Location },
|
||||
|
||||
@@ -26,6 +26,7 @@ common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-config = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
@@ -48,6 +49,7 @@ metrics.workspace = true
|
||||
mito2 = { workspace = true }
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { workspace = true }
|
||||
plugins.workspace = true
|
||||
prost.workspace = true
|
||||
query = { workspace = true }
|
||||
rand.workspace = true
|
||||
|
||||
@@ -35,7 +35,7 @@ use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ErrorCompat, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
@@ -148,7 +148,7 @@ impl Repl {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.iter_chain().last().unwrap();
|
||||
let root_cause = e.output_msg();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
@@ -257,10 +257,11 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
cached_meta_backend.clone(),
|
||||
datanode_clients,
|
||||
);
|
||||
let plugins: Arc<Plugins> = Default::default();
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
@@ -20,7 +20,7 @@ use client::api::v1::meta::TableRouteValue;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::error as MetaError;
|
||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
@@ -405,8 +405,11 @@ impl MigrateTableMetadata {
|
||||
DatanodeTableValue::new(
|
||||
table_id,
|
||||
regions,
|
||||
engine.to_string(),
|
||||
region_storage_path.clone(),
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.clone(),
|
||||
region_options: (&value.table_info.meta.options).into(),
|
||||
},
|
||||
),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -31,6 +31,10 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_datanode_plugins(self.datanode.plugins())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
@@ -159,11 +163,15 @@ impl StartCommand {
|
||||
Ok(Options::Datanode(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
|
||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = DatanodeBuilder::new(opts, None, Default::default())
|
||||
let datanode = DatanodeBuilder::new(opts, None, plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -255,6 +263,7 @@ mod tests {
|
||||
connect_timeout_millis,
|
||||
tcp_nodelay,
|
||||
ddl_timeout_millis,
|
||||
..
|
||||
} = options.meta_client.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
|
||||
@@ -16,12 +16,14 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to create default catalog and schema"))]
|
||||
InitMetadata {
|
||||
@@ -83,12 +85,6 @@ pub enum Error {
|
||||
#[snafu(display("Illegal config: {}", msg))]
|
||||
IllegalConfig { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Illegal auth config"))]
|
||||
IllegalAuthConfig {
|
||||
location: Location,
|
||||
source: auth::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type: {}", selector_type))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
@@ -101,13 +97,15 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Cannot create REPL"))]
|
||||
ReplCreation {
|
||||
source: ReadlineError,
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command"))]
|
||||
Readline {
|
||||
source: ReadlineError,
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -157,7 +155,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
source: ConfigError,
|
||||
#[snafu(source)]
|
||||
error: ConfigError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -170,7 +169,15 @@ pub enum Error {
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
source: etcd_client::Error,
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
@@ -195,7 +202,6 @@ impl ErrorExt for Error {
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::IllegalAuthConfig { .. }
|
||||
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
@@ -208,6 +214,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use auth::UserProviderRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
@@ -25,7 +23,7 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::error::{self, Result, StartFrontendSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -34,10 +32,11 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
@@ -88,6 +87,8 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long)]
|
||||
grpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
@@ -141,6 +142,10 @@ impl StartCommand {
|
||||
opts.http.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
|
||||
if let Some(disable_dashboard) = self.disable_dashboard {
|
||||
opts.http.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
@@ -177,38 +182,32 @@ impl StartCommand {
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
logging::info!("Frontend start command: {:#?}", self);
|
||||
logging::info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
|
||||
let plugins = Plugins::new();
|
||||
|
||||
if let Some(provider) = user_provider {
|
||||
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
|
||||
plugins.insert::<UserProviderRef>(provider);
|
||||
}
|
||||
Ok(plugins)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
@@ -218,6 +217,7 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use frontend::service_config::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
@@ -303,14 +303,17 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
let mut fe_opts = FrontendOptions {
|
||||
http: HttpOptions {
|
||||
disable_dashboard: false,
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
let plugins = plugins.unwrap();
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
|
||||
@@ -20,7 +20,7 @@ use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -29,10 +29,10 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
plugins::start_meta_srv_plugins(self.instance.plugins())
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
.context(StartMetaServerSnafu)?;
|
||||
self.instance.start().await.context(StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
@@ -158,12 +158,15 @@ impl StartCommand {
|
||||
Ok(Options::Metasrv(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
|
||||
let plugins = plugins::setup_meta_srv_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
let instance = MetaSrvInstance::new(opts, plugins)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ use meta_srv::metasrv::MetaSrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result};
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
@@ -94,9 +94,16 @@ impl Options {
|
||||
.ignore_empty(true)
|
||||
};
|
||||
|
||||
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
|
||||
// `ConfigSerializer` cannot handle the case of an empty struct contained
|
||||
// within an iterative structure.
|
||||
// See: https://github.com/mehcode/config-rs/issues/461
|
||||
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
|
||||
let default_config = File::from_str(&json_str, FileFormat::Json);
|
||||
|
||||
// Add default values and environment variables as the sources of the configuration.
|
||||
let mut layered_config = Config::builder()
|
||||
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
|
||||
.add_source(default_config)
|
||||
.add_source(env_source);
|
||||
|
||||
// Add config file as the source of the configuration if it is specified.
|
||||
|
||||
@@ -18,7 +18,7 @@ use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::{kv_store_dir, KvStoreConfig, WalConfig};
|
||||
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
@@ -44,7 +44,6 @@ use crate::error::{
|
||||
IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -298,8 +297,11 @@ impl StartCommand {
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let fe_opts = opts.frontend;
|
||||
let mut fe_opts = opts.frontend;
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode;
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
@@ -308,14 +310,17 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let kv_dir = kv_store_dir(&opts.data_home);
|
||||
let (kv_store, procedure_manager) =
|
||||
FeInstance::try_build_standalone_components(kv_dir, opts.kv_store, opts.procedure)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
metadata_dir,
|
||||
opts.kv_store,
|
||||
opts.procedure,
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let datanode =
|
||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), plugins.clone())
|
||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -335,7 +340,7 @@ impl StartCommand {
|
||||
|
||||
// TODO: build frontend instance like in distributed mode
|
||||
let mut frontend = build_frontend(
|
||||
plugins,
|
||||
fe_plugins,
|
||||
kv_store,
|
||||
procedure_manager,
|
||||
catalog_manager,
|
||||
@@ -354,7 +359,7 @@ impl StartCommand {
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
plugins: Arc<Plugins>,
|
||||
plugins: Plugins,
|
||||
kv_store: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
@@ -388,13 +393,13 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
let mut fe_opts = FrontendOptions {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
let plugins = plugins.unwrap();
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
@@ -593,4 +598,25 @@ mod tests {
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_default_standalone_options() {
|
||||
let options: StandaloneOptions =
|
||||
Options::load_layered_options(None, "GREPTIMEDB_FRONTEND", None).unwrap();
|
||||
let default_options = StandaloneOptions::default();
|
||||
assert_eq!(options.mode, default_options.mode);
|
||||
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
||||
assert_eq!(options.http, default_options.http);
|
||||
assert_eq!(options.grpc, default_options.grpc);
|
||||
assert_eq!(options.mysql, default_options.mysql);
|
||||
assert_eq!(options.postgres, default_options.postgres);
|
||||
assert_eq!(options.opentsdb, default_options.opentsdb);
|
||||
assert_eq!(options.influxdb, default_options.influxdb);
|
||||
assert_eq!(options.prom_store, default_options.prom_store);
|
||||
assert_eq!(options.wal, default_options.wal);
|
||||
assert_eq!(options.kv_store, default_options.kv_store);
|
||||
assert_eq!(options.procedure, default_options.procedure);
|
||||
assert_eq!(options.logging, default_options.logging);
|
||||
assert_eq!(options.region_engine, default_options.region_engine);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -17,11 +17,13 @@ use std::io::{Read, Write};
|
||||
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_macro::stack_trace_debug;
|
||||
use paste::paste;
|
||||
use snafu::{ensure, Location, ResultExt, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Destination buffer overflow, src_len: {}, dst_len: {}",
|
||||
@@ -39,7 +41,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("IO operation reach EOF"))]
|
||||
Eof {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
/// [`Plugins`] is a wrapper of Arc contents.
|
||||
/// Make it Cloneable and we can treat it like an Arc struct.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Plugins {
|
||||
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -16,10 +16,12 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid full table name: {}", table_name))]
|
||||
InvalidFullTableName {
|
||||
|
||||
@@ -13,9 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use consts::DEFAULT_CATALOG_NAME;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
@@ -26,17 +23,6 @@ pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> Strin
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
pub fn parse_full_table_name(table_name: &str) -> Result<(&str, &str, &str)> {
|
||||
let result = table_name.split('.').collect::<Vec<_>>();
|
||||
|
||||
ensure!(
|
||||
result.len() == 3,
|
||||
error::InvalidFullTableNameSnafu { table_name }
|
||||
);
|
||||
|
||||
Ok((result[0], result[1], result[2]))
|
||||
}
|
||||
|
||||
/// Build db name from catalog and schema string
|
||||
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::time::Duration;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal file size in bytes
|
||||
@@ -45,11 +45,11 @@ impl Default for WalConfig {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kv_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/kv")
|
||||
pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/metadata")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvStoreConfig {
|
||||
// Kv file size in bytes
|
||||
|
||||
@@ -18,6 +18,7 @@ async-compression = { version = "0.3", features = [
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
datafusion.workspace = true
|
||||
derive_builder.workspace = true
|
||||
|
||||
@@ -17,12 +17,14 @@ use std::any::Any;
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::parquet::errors::ParquetError;
|
||||
use snafu::{Location, Snafu};
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
||||
UnsupportedCompressionType {
|
||||
@@ -46,83 +48,96 @@ pub enum Error {
|
||||
#[snafu(display("Invalid url: {}", url))]
|
||||
InvalidUrl {
|
||||
url: String,
|
||||
source: ParseError,
|
||||
#[snafu(source)]
|
||||
error: ParseError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build backend"))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build orc reader"))]
|
||||
OrcReader {
|
||||
location: Location,
|
||||
source: orc_rust::error::Error,
|
||||
#[snafu(source)]
|
||||
error: orc_rust::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read object from path: {}", path))]
|
||||
ReadObject {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write object to path: {}", path))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write"))]
|
||||
AsyncWrite {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write record batch"))]
|
||||
WriteRecordBatch {
|
||||
location: Location,
|
||||
source: ArrowError,
|
||||
#[snafu(source)]
|
||||
error: ArrowError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode record batch"))]
|
||||
EncodeRecordBatch {
|
||||
location: Location,
|
||||
source: ParquetError,
|
||||
#[snafu(source)]
|
||||
error: ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read record batch"))]
|
||||
ReadRecordBatch {
|
||||
location: Location,
|
||||
source: datafusion::error::DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read parquet"))]
|
||||
ReadParquetSnafu {
|
||||
location: Location,
|
||||
source: datafusion::parquet::errors::ParquetError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert parquet to schema"))]
|
||||
ParquetToSchema {
|
||||
location: Location,
|
||||
source: datafusion::parquet::errors::ParquetError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to infer schema from file"))]
|
||||
InferSchema {
|
||||
location: Location,
|
||||
source: arrow_schema::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: arrow_schema::ArrowError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list object in path: {}", path))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
@@ -131,7 +146,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to join handle"))]
|
||||
JoinHandle {
|
||||
location: Location,
|
||||
source: tokio::task::JoinError,
|
||||
#[snafu(source)]
|
||||
error: tokio::task::JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
||||
@@ -143,7 +159,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to merge schema"))]
|
||||
MergeSchema {
|
||||
source: arrow_schema::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: arrow_schema::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -13,11 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
/// Extension to [`Error`](std::error::Error) in std.
|
||||
pub trait ErrorExt: std::error::Error {
|
||||
pub trait ErrorExt: StackError {
|
||||
/// Map this error to [StatusCode].
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::Unknown
|
||||
@@ -33,6 +34,63 @@ pub trait ErrorExt: std::error::Error {
|
||||
/// Returns the error as [Any](std::any::Any) so that it can be
|
||||
/// downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
fn output_msg(&self) -> String
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let error = self.last();
|
||||
if let Some(external_error) = error.source() {
|
||||
let external_root = external_error.sources().last().unwrap();
|
||||
|
||||
if error.to_string().is_empty() {
|
||||
format!("{external_root}")
|
||||
} else {
|
||||
format!("{error}: {external_root}")
|
||||
}
|
||||
} else {
|
||||
format!("{error}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait StackError: std::error::Error {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>);
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError>;
|
||||
|
||||
fn last(&self) -> &dyn StackError
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let Some(mut result) = self.next() else {
|
||||
return self;
|
||||
};
|
||||
while let Some(err) = result.next() {
|
||||
result = err;
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + StackError> StackError for Arc<T> {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
self.as_ref().debug_fmt(layer, buf)
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
self.as_ref().next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: StackError> StackError for Box<T> {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
self.as_ref().debug_fmt(layer, buf)
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
self.as_ref().next()
|
||||
}
|
||||
}
|
||||
|
||||
/// An opaque boxed error based on errors that implement [ErrorExt] trait.
|
||||
@@ -90,6 +148,16 @@ impl crate::snafu::ErrorCompat for BoxedError {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for BoxedError {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
self.inner.debug_fmt(layer, buf)
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
self.inner.next()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type with plain error message
|
||||
#[derive(Debug)]
|
||||
pub struct PlainError {
|
||||
@@ -128,3 +196,13 @@ impl crate::ext::ErrorExt for PlainError {
|
||||
self as _
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for PlainError {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
buf.push(format!("{}: {}", layer, self.msg))
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ mod tests {
|
||||
use snafu::{GenerateImplicitData, Location};
|
||||
|
||||
use super::*;
|
||||
use crate::ext::StackError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("This is a leaf error"))]
|
||||
@@ -65,6 +66,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for Leaf {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("This is a leaf with location"))]
|
||||
struct LeafWithLocation {
|
||||
@@ -81,6 +90,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for LeafWithLocation {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("Internal error"))]
|
||||
struct Internal {
|
||||
@@ -99,6 +116,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for Internal {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
buf.push(format!("{}: Internal error, at {}", layer, self.location));
|
||||
self.source.debug_fmt(layer + 1, buf);
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
Some(&self.source)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let err = Leaf;
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![feature(error_iter)]
|
||||
|
||||
pub mod ext;
|
||||
pub mod format;
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::fmt;
|
||||
|
||||
use snafu::Location;
|
||||
|
||||
use crate::ext::ErrorExt;
|
||||
use crate::ext::{ErrorExt, StackError};
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
/// A mock error mainly for test.
|
||||
@@ -69,3 +69,11 @@ impl ErrorExt for MockError {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for MockError {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
@@ -23,5 +25,6 @@ pub(crate) struct TimestampFunction;
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
}
|
||||
|
||||
175
src/common/function/src/scalars/timestamp/greatest.rs
Normal file
175
src/common/function/src/scalars/timestamp/greatest.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
|
||||
use common_query::error::{
|
||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::comparison::gt_dyn;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GreatestFunction;
|
||||
|
||||
const NAME: &str = "greatest";
|
||||
|
||||
impl Function for GreatestFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::date_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
2,
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date32)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let column1 = columns[0].to_arrow_array();
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = columns[1].to_arrow_array();
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GreatestFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "GREATEST")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::Date;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::DateType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{DateVector, StringVector, Vector};
|
||||
|
||||
use super::GreatestFunction;
|
||||
use crate::scalars::function::FunctionContext;
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
"1970-01-01".to_string(),
|
||||
"2012-12-23".to_string(),
|
||||
])) as _,
|
||||
Arc::new(StringVector::from(vec![
|
||||
"2001-02-01".to_string(),
|
||||
"1999-01-01".to_string(),
|
||||
])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str("2001-02-01").unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str("2012-12-23").unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_date_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str("1970-01-01").unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str("1970-01-03").unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ async-trait.workspace = true
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
|
||||
@@ -16,10 +16,12 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal delete request, reason: {reason}"))]
|
||||
IllegalDeleteRequest { reason: String, location: Location },
|
||||
|
||||
@@ -11,6 +11,7 @@ async-trait = "0.1"
|
||||
backtrace = "0.3"
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
|
||||
@@ -30,7 +30,7 @@ use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsCon
|
||||
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 1;
|
||||
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
|
||||
|
||||
@@ -17,19 +17,22 @@ use std::io;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid client tls config, {}", msg))]
|
||||
InvalidTlsConfig { msg: String },
|
||||
|
||||
#[snafu(display("Invalid config file path"))]
|
||||
InvalidConfigFilePath {
|
||||
source: io::Error,
|
||||
#[snafu(source)]
|
||||
error: io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -48,7 +51,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to create gRPC channel"))]
|
||||
CreateChannel {
|
||||
source: tonic::transport::Error,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -63,7 +67,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to decode FlightData"))]
|
||||
DecodeFlightData {
|
||||
source: api::DecodeError,
|
||||
#[snafu(source)]
|
||||
error: api::DecodeError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -13,6 +13,15 @@ common-telemetry = { workspace = true }
|
||||
proc-macro2 = "1.0.66"
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
syn2 = { version = "2.0", package = "syn", features = [
|
||||
"derive",
|
||||
"parsing",
|
||||
"printing",
|
||||
"clone-impls",
|
||||
"proc-macro",
|
||||
"extra-traits",
|
||||
"full",
|
||||
] }
|
||||
|
||||
[dev-dependencies]
|
||||
arc-swap = "1.0"
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
mod aggr_func;
|
||||
mod print_caller;
|
||||
mod range_fn;
|
||||
mod stack_trace_debug;
|
||||
|
||||
use aggr_func::{impl_aggr_func_type_store, impl_as_aggr_func_creator};
|
||||
use print_caller::process_print_caller;
|
||||
@@ -87,3 +88,23 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_print_caller(args, input)
|
||||
}
|
||||
|
||||
/// Attribute macro to derive [std::fmt::Debug] for the annotated `Error` type.
|
||||
///
|
||||
/// The generated `Debug` implementation will print the error in a stack trace style. E.g.:
|
||||
/// ```plaintext
|
||||
/// 0: Foo error, at src/common/catalog/src/error.rs:80:10
|
||||
/// 1: Bar error, at src/common/function/src/error.rs:90:10
|
||||
/// 2: Root cause, invalid table name, at src/common/catalog/src/error.rs:100:10
|
||||
/// ```
|
||||
///
|
||||
/// Notes on using this macro:
|
||||
/// - `#[snafu(display)]` must present on each enum variants,
|
||||
/// and should not include `location` and `source`.
|
||||
/// - Only our internal error can be named `source`.
|
||||
/// All external error should be `error` with an `#[snafu(source)]` annotation.
|
||||
/// - `common_error` crate must be accessible.
|
||||
#[proc_macro_attribute]
|
||||
pub fn stack_trace_debug(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
stack_trace_debug::stack_trace_style_impl(args.into(), input.into()).into()
|
||||
}
|
||||
|
||||
278
src/common/macro/src/stack_trace_debug.rs
Normal file
278
src/common/macro/src/stack_trace_debug.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! implement `::common_error::ext::StackError`
|
||||
|
||||
use proc_macro2::{Span, TokenStream as TokenStream2};
|
||||
use quote::{quote, quote_spanned};
|
||||
use syn2::spanned::Spanned;
|
||||
use syn2::{parenthesized, Attribute, Ident, ItemEnum, Variant};
|
||||
|
||||
pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 {
|
||||
let input_cloned: TokenStream2 = input.clone();
|
||||
|
||||
let error_enum_definition: ItemEnum = syn2::parse2(input_cloned).unwrap();
|
||||
let enum_name = error_enum_definition.ident;
|
||||
|
||||
let mut variants = vec![];
|
||||
|
||||
for error_variant in error_enum_definition.variants {
|
||||
let variant = ErrorVariant::from_enum_variant(error_variant);
|
||||
variants.push(variant);
|
||||
}
|
||||
|
||||
let debug_fmt_fn = build_debug_fmt_impl(enum_name.clone(), variants.clone());
|
||||
let next_fn = build_next_impl(enum_name.clone(), variants);
|
||||
let debug_impl = build_debug_impl(enum_name.clone());
|
||||
|
||||
quote! {
|
||||
#args
|
||||
#input
|
||||
|
||||
impl ::common_error::ext::StackError for #enum_name {
|
||||
#debug_fmt_fn
|
||||
#next_fn
|
||||
}
|
||||
|
||||
#debug_impl
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate `debug_fmt` fn.
|
||||
///
|
||||
/// The generated fn will be like:
|
||||
/// ```rust, ignore
|
||||
/// fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>);
|
||||
/// ```
|
||||
fn build_debug_fmt_impl(enum_name: Ident, variants: Vec<ErrorVariant>) -> TokenStream2 {
|
||||
let match_arms = variants
|
||||
.iter()
|
||||
.map(|v| v.to_debug_match_arm())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
quote! {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
use #enum_name::*;
|
||||
match self {
|
||||
#(#match_arms)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate `next` fn.
|
||||
///
|
||||
/// The generated fn will be like:
|
||||
/// ```rust, ignore
|
||||
/// fn next(&self) -> Option<&dyn ::common_error::ext::StackError>;
|
||||
/// ```
|
||||
fn build_next_impl(enum_name: Ident, variants: Vec<ErrorVariant>) -> TokenStream2 {
|
||||
let match_arms = variants
|
||||
.iter()
|
||||
.map(|v| v.to_next_match_arm())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
quote! {
|
||||
fn next(&self) -> Option<&dyn ::common_error::ext::StackError> {
|
||||
use #enum_name::*;
|
||||
match self {
|
||||
#(#match_arms)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement [std::fmt::Debug] via `debug_fmt`
|
||||
fn build_debug_impl(enum_name: Ident) -> TokenStream2 {
|
||||
quote! {
|
||||
impl std::fmt::Debug for #enum_name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
use ::common_error::ext::StackError;
|
||||
let mut buf = vec![];
|
||||
self.debug_fmt(0, &mut buf);
|
||||
write!(f, "{}", buf.join("\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ErrorVariant {
|
||||
name: Ident,
|
||||
fields: Vec<Ident>,
|
||||
has_location: bool,
|
||||
has_source: bool,
|
||||
has_external_cause: bool,
|
||||
display: TokenStream2,
|
||||
span: Span,
|
||||
cfg_attr: Option<Attribute>,
|
||||
}
|
||||
|
||||
impl ErrorVariant {
|
||||
/// Construct self from [Variant]
|
||||
fn from_enum_variant(variant: Variant) -> Self {
|
||||
let span = variant.span();
|
||||
let mut has_location = false;
|
||||
let mut has_source = false;
|
||||
let mut has_external_cause = false;
|
||||
|
||||
for field in &variant.fields {
|
||||
if let Some(ident) = &field.ident {
|
||||
if ident == "location" {
|
||||
has_location = true;
|
||||
} else if ident == "source" {
|
||||
has_source = true;
|
||||
} else if ident == "error" {
|
||||
has_external_cause = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut display = None;
|
||||
let mut cfg_attr = None;
|
||||
for attr in variant.attrs {
|
||||
if attr.path().is_ident("snafu") {
|
||||
attr.parse_nested_meta(|meta| {
|
||||
if meta.path.is_ident("display") {
|
||||
let content;
|
||||
parenthesized!(content in meta.input);
|
||||
let display_ts: TokenStream2 = content.parse()?;
|
||||
display = Some(display_ts);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(meta.error("unrecognized repr"))
|
||||
}
|
||||
})
|
||||
.expect("Each error should contains a display attribute");
|
||||
}
|
||||
|
||||
if attr.path().is_ident("cfg") {
|
||||
cfg_attr = Some(attr);
|
||||
}
|
||||
}
|
||||
|
||||
let field_ident = variant
|
||||
.fields
|
||||
.iter()
|
||||
.map(|f| f.ident.clone().unwrap_or_else(|| Ident::new("_", f.span())))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
name: variant.ident,
|
||||
fields: field_ident,
|
||||
has_location,
|
||||
has_source,
|
||||
has_external_cause,
|
||||
display: display.unwrap(),
|
||||
span,
|
||||
cfg_attr,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert self into an match arm that will be used in [build_debug_impl].
|
||||
///
|
||||
/// The generated match arm will be like:
|
||||
/// ```rust, ignore
|
||||
/// ErrorKindWithSource { source, .. } => {
|
||||
/// debug_fmt(source, layer + 1, buf);
|
||||
/// },
|
||||
/// ErrorKindWithoutSource { .. } => {
|
||||
/// buf.push(format!("{layer}: {}, at {}", format!(#display), location)));
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The generated code assumes fn `debug_fmt`, var `layer`, var `buf` are in scope.
|
||||
fn to_debug_match_arm(&self) -> TokenStream2 {
|
||||
let name = &self.name;
|
||||
let fields = &self.fields;
|
||||
let display = &self.display;
|
||||
let cfg = if let Some(cfg) = &self.cfg_attr {
|
||||
quote_spanned!(cfg.span() => #cfg)
|
||||
} else {
|
||||
quote! {}
|
||||
};
|
||||
|
||||
match (self.has_location, self.has_source, self.has_external_cause) {
|
||||
(true, true, _) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),*, } => {
|
||||
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
|
||||
source.debug_fmt(layer + 1, buf);
|
||||
},
|
||||
},
|
||||
(true, false, true) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
|
||||
buf.push(format!("{}: {:?}", layer + 1, error));
|
||||
},
|
||||
},
|
||||
(true, false, false) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
|
||||
},
|
||||
},
|
||||
(false, true, _) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}", format!(#display)));
|
||||
source.debug_fmt(layer + 1, buf);
|
||||
},
|
||||
},
|
||||
(false, false, true) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}", format!(#display)));
|
||||
buf.push(format!("{}: {:?}", layer + 1, error));
|
||||
},
|
||||
},
|
||||
(false, false, false) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}", format!(#display)));
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert self into an match arm that will be used in [build_next_impl].
|
||||
///
|
||||
/// The generated match arm will be like:
|
||||
/// ```rust, ignore
|
||||
/// ErrorKindWithSource { source, .. } => {
|
||||
/// Some(source)
|
||||
/// },
|
||||
/// ErrorKindWithoutSource { .. } => {
|
||||
/// None
|
||||
/// }
|
||||
/// ```
|
||||
fn to_next_match_arm(&self) -> TokenStream2 {
|
||||
let name = &self.name;
|
||||
let fields = &self.fields;
|
||||
let cfg = if let Some(cfg) = &self.cfg_attr {
|
||||
quote_spanned!(cfg.span() => #cfg)
|
||||
} else {
|
||||
quote! {}
|
||||
};
|
||||
|
||||
if self.has_source {
|
||||
quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
Some(source)
|
||||
},
|
||||
}
|
||||
} else {
|
||||
quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } =>{
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
snafu.workspace = true
|
||||
tempfile = "3.4"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,14 +16,16 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::Snafu;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(""))]
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal { source: BoxedError },
|
||||
|
||||
#[snafu(display("Memory profiling is not supported"))]
|
||||
|
||||
@@ -17,13 +17,18 @@ use std::path::PathBuf;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to read OPT_PROF"))]
|
||||
ReadOptProf { source: tikv_jemalloc_ctl::Error },
|
||||
ReadOptProf {
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Memory profiling is not enabled"))]
|
||||
ProfilingNotEnabled,
|
||||
@@ -34,13 +39,15 @@ pub enum Error {
|
||||
#[snafu(display("Failed to open temp file: {}", path))]
|
||||
OpenTempFile {
|
||||
path: String,
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))]
|
||||
DumpProfileData {
|
||||
path: PathBuf,
|
||||
source: tikv_jemalloc_ctl::Error,
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,12 @@ api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64 = "0.21"
|
||||
bytes = "1.4"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc-expr.workspace = true
|
||||
common-macro = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::schema_name::SchemaNameKey;
|
||||
use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
@@ -68,36 +67,25 @@ impl CacheInvalidator for DummyCacheInvalidator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TableMetadataCacheInvalidator(KvCacheInvalidatorRef);
|
||||
|
||||
impl TableMetadataCacheInvalidator {
|
||||
pub fn new(kv_cache_invalidator: KvCacheInvalidatorRef) -> Self {
|
||||
Self(kv_cache_invalidator)
|
||||
}
|
||||
|
||||
pub async fn invalidate_schema(&self, catalog: &str, schema: &str) {
|
||||
let key = SchemaNameKey::new(catalog, schema).as_raw_key();
|
||||
self.0.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for TableMetadataCacheInvalidator {
|
||||
impl<T> CacheInvalidator for T
|
||||
where
|
||||
T: KvCacheInvalidator,
|
||||
{
|
||||
async fn invalidate_table_name(&self, _ctx: &Context, table_name: TableName) -> Result<()> {
|
||||
let key: TableNameKey = (&table_name).into();
|
||||
|
||||
self.0.invalidate_key(&key.as_raw_key()).await;
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn invalidate_table_id(&self, _ctx: &Context, table_id: TableId) -> Result<()> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
self.0.invalidate_key(&key.as_raw_key()).await;
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
|
||||
let key = &TableRouteKey { table_id };
|
||||
self.0.invalidate_key(&key.as_raw_key()).await;
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ use crate::rpc::router::RegionRoute;
|
||||
pub mod alter_table;
|
||||
pub mod create_table;
|
||||
pub mod drop_table;
|
||||
pub mod truncate_table;
|
||||
pub mod utils;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
||||
@@ -45,6 +45,7 @@ use crate::error::{
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders};
|
||||
@@ -63,7 +64,7 @@ impl AlterTableProcedure {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: AlterTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
let alter_kind = task
|
||||
@@ -191,7 +192,8 @@ impl AlterTableProcedure {
|
||||
.await?
|
||||
.with_context(|| TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let leaders = find_leaders(®ion_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
@@ -413,7 +415,7 @@ pub struct AlterTableData {
|
||||
state: AlterTableState,
|
||||
task: AlterTableTask,
|
||||
/// Table info value before alteration.
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
cluster_id: u64,
|
||||
/// Next column id of the table if the task adds columns to the table.
|
||||
next_column_id: Option<ColumnId>,
|
||||
@@ -422,7 +424,7 @@ pub struct AlterTableData {
|
||||
impl AlterTableData {
|
||||
pub fn new(
|
||||
task: AlterTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
cluster_id: u64,
|
||||
next_column_id: Option<ColumnId>,
|
||||
) -> Self {
|
||||
|
||||
@@ -199,8 +199,8 @@ impl CreateTableProcedure {
|
||||
for request in requests {
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: 0,
|
||||
span_id: 0,
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(request),
|
||||
};
|
||||
|
||||
@@ -39,6 +39,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::DropTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
@@ -55,8 +56,8 @@ impl DropTableProcedure {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: DropTableTask,
|
||||
table_route_value: TableRouteValue,
|
||||
table_info_value: TableInfoValue,
|
||||
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -156,8 +157,8 @@ impl DropTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: 0,
|
||||
span_id: 0,
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
@@ -231,16 +232,16 @@ pub struct DropTableData {
|
||||
pub state: DropTableState,
|
||||
pub cluster_id: u64,
|
||||
pub task: DropTableTask,
|
||||
pub table_route_value: TableRouteValue,
|
||||
pub table_info_value: TableInfoValue,
|
||||
pub table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
pub table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
}
|
||||
|
||||
impl DropTableData {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: DropTableTask,
|
||||
table_route_value: TableRouteValue,
|
||||
table_info_value: TableInfoValue,
|
||||
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: DropTableState::Prepare,
|
||||
|
||||
235
src/common/meta/src/ddl/truncate_table.rs
Normal file
235
src/common/meta/src/ddl/truncate_table.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::region::{
|
||||
region_request, RegionRequest, RegionRequestHeader, TruncateRequest as PbTruncateRegionRequest,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::debug;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::utils::handle_retry_error;
|
||||
use crate::ddl::utils::handle_operate_region_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::TruncateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
pub struct TruncateTableProcedure {
|
||||
context: DdlContext,
|
||||
data: TruncateTableData,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for TruncateTableProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_TRUNCATE_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
|
||||
match self.data.state {
|
||||
TruncateTableState::Prepare => self.on_prepare().await,
|
||||
TruncateTableState::DatanodeTruncateRegions => {
|
||||
self.on_datanode_truncate_regions().await
|
||||
}
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let table_ref = &self.data.table_ref();
|
||||
let key = common_catalog::format_full_table_name(
|
||||
table_ref.catalog,
|
||||
table_ref.schema,
|
||||
table_ref.table,
|
||||
);
|
||||
|
||||
LockKey::single(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl TruncateTableProcedure {
|
||||
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
|
||||
|
||||
pub(crate) fn new(
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let table_ref = &self.data.table_ref();
|
||||
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let exist = manager
|
||||
.table_name_manager()
|
||||
.exists(TableNameKey::new(
|
||||
table_ref.catalog,
|
||||
table_ref.schema,
|
||||
table_ref.table,
|
||||
))
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
exist,
|
||||
TableNotFoundSnafu {
|
||||
table_name: table_ref.to_string()
|
||||
}
|
||||
);
|
||||
|
||||
self.data.state = TruncateTableState::DatanodeTruncateRegions;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_datanode_truncate_regions(&mut self) -> Result<Status> {
|
||||
let table_id = self.data.table_id();
|
||||
|
||||
let region_routes = &self.data.region_routes;
|
||||
let leaders = find_leaders(region_routes);
|
||||
let mut truncate_region_tasks = Vec::with_capacity(leaders.len());
|
||||
|
||||
for datanode in leaders {
|
||||
let requester = self.context.datanode_manager.datanode(&datanode).await;
|
||||
let regions = find_leader_regions(region_routes, &datanode);
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
debug!(
|
||||
"Truncating table {} region {} on Datanode {:?}",
|
||||
self.data.table_ref(),
|
||||
region_id,
|
||||
datanode
|
||||
);
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
})),
|
||||
};
|
||||
|
||||
let datanode = datanode.clone();
|
||||
let requester = requester.clone();
|
||||
|
||||
truncate_region_tasks.push(async move {
|
||||
if let Err(err) = requester.handle(request).await {
|
||||
return Err(handle_operate_region_error(datanode)(err));
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
join_all(truncate_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct TruncateTableData {
|
||||
state: TruncateTableState,
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
}
|
||||
|
||||
impl TruncateTableData {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: TruncateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
self.task.table_ref()
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
self.task.table_name()
|
||||
}
|
||||
|
||||
fn table_info(&self) -> &RawTableInfo {
|
||||
&self.table_info_value.table_info
|
||||
}
|
||||
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_info().ident.table_id
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
enum TruncateTableState {
|
||||
/// Prepares to truncate the table
|
||||
Prepare,
|
||||
/// Truncates regions on Datanode
|
||||
DatanodeTruncateRegions,
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
@@ -23,18 +23,19 @@ use crate::datanode_manager::DatanodeManagerRef;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{
|
||||
DdlContext, DdlTaskExecutor, ExecutorContext, TableMetadataAllocatorContext,
|
||||
TableMetadataAllocatorRef,
|
||||
};
|
||||
use crate::error::{
|
||||
self, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu, TableNotFoundSnafu,
|
||||
UnsupportedSnafu, WaitProcedureSnafu,
|
||||
WaitProcedureSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::rpc::ddl::DdlTask::{AlterTable, CreateTable, DropTable, TruncateTable};
|
||||
use crate::rpc::ddl::{
|
||||
AlterTableTask, CreateTableTask, DropTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse,
|
||||
@@ -122,6 +123,20 @@ impl DdlManager {
|
||||
)
|
||||
.context(RegisterProcedureLoaderSnafu {
|
||||
type_name: AlterTableProcedure::TYPE_NAME,
|
||||
})?;
|
||||
|
||||
let context = self.create_context();
|
||||
|
||||
self.procedure_manager
|
||||
.register_loader(
|
||||
TruncateTableProcedure::TYPE_NAME,
|
||||
Box::new(move |json| {
|
||||
let context = context.clone();
|
||||
TruncateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
|
||||
}),
|
||||
)
|
||||
.context(RegisterProcedureLoaderSnafu {
|
||||
type_name: TruncateTableProcedure::TYPE_NAME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -129,7 +144,7 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
alter_table_task: AlterTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<ProcedureId> {
|
||||
let context = self.create_context();
|
||||
|
||||
@@ -161,8 +176,8 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
drop_table_task: DropTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_route_value: TableRouteValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
) -> Result<ProcedureId> {
|
||||
let context = self.create_context();
|
||||
|
||||
@@ -183,15 +198,21 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Result<ProcedureId> {
|
||||
error!("Truncate table procedure is not supported, cluster_id = {}, truncate_table_task = {:?}, region_routes = {:?}",
|
||||
cluster_id, truncate_table_task, region_routes);
|
||||
let context = self.create_context();
|
||||
let procedure = TruncateTableProcedure::new(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
context,
|
||||
);
|
||||
|
||||
UnsupportedSnafu {
|
||||
operation: "TRUNCATE TABLE",
|
||||
}
|
||||
.fail()
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
async fn submit_procedure(&self, procedure_with_id: ProcedureWithId) -> Result<ProcedureId> {
|
||||
@@ -216,32 +237,34 @@ async fn handle_truncate_table_task(
|
||||
cluster_id: u64,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let truncate_table = &truncate_table_task.truncate_table;
|
||||
let table_id = truncate_table
|
||||
.table_id
|
||||
.as_ref()
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "expected table id ",
|
||||
})?
|
||||
.id;
|
||||
|
||||
let table_id = truncate_table_task.table_id;
|
||||
let table_metadata_manager = &ddl_manager.table_metadata_manager();
|
||||
let table_ref = truncate_table_task.table_ref();
|
||||
|
||||
let table_route_value = ddl_manager
|
||||
.table_metadata_manager()
|
||||
.table_route_manager()
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| error::TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
let (table_info_value, table_route_value) =
|
||||
table_metadata_manager.get_full_table_info(table_id).await?;
|
||||
|
||||
let table_route = table_route_value.region_routes;
|
||||
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let table_route = table_route_value.into_inner().region_routes;
|
||||
|
||||
let id = ddl_manager
|
||||
.submit_truncate_table_task(cluster_id, truncate_table_task, table_route)
|
||||
.submit_truncate_table_task(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
table_route,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is truncated via procedure_id {id:?}");
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: id.to_string().into(),
|
||||
..Default::default()
|
||||
|
||||
@@ -29,3 +29,9 @@ pub const REGION_LEASE_SECS: u64 =
|
||||
/// When creating table or region failover, a target node needs to be selected.
|
||||
/// If the node's lease has expired, the `Selector` will not select it.
|
||||
pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
|
||||
|
||||
/// The lease seconds of metasrv leader.
|
||||
pub const META_LEASE_SECS: u64 = 3;
|
||||
|
||||
// In a lease, there are two opportunities for renewal.
|
||||
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::str::Utf8Error;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::RegionNumber;
|
||||
@@ -23,8 +24,9 @@ use table::metadata::TableId;
|
||||
|
||||
use crate::peer::Peer;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to get sequence: {}", err_msg))]
|
||||
NextSequence { err_msg: String, location: Location },
|
||||
@@ -83,7 +85,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to build table meta for table: {}", table_name))]
|
||||
BuildTableMeta {
|
||||
table_name: String,
|
||||
source: table::metadata::TableMetaBuilderError,
|
||||
#[snafu(source)]
|
||||
error: table::metadata::TableMetaBuilderError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -102,19 +105,22 @@ pub enum Error {
|
||||
#[snafu(display("Failed to decode protobuf"))]
|
||||
DecodeProto {
|
||||
location: Location,
|
||||
source: prost::DecodeError,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode object into json"))]
|
||||
EncodeJson {
|
||||
location: Location,
|
||||
source: JsonError,
|
||||
#[snafu(source)]
|
||||
error: JsonError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode object from json"))]
|
||||
DecodeJson {
|
||||
location: Location,
|
||||
source: JsonError,
|
||||
#[snafu(source)]
|
||||
error: JsonError,
|
||||
},
|
||||
|
||||
#[snafu(display("Payload not exist"))]
|
||||
@@ -125,7 +131,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
source: serde_json::error::Error,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -177,7 +184,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to convert raw key to str"))]
|
||||
ConvertRawKey {
|
||||
location: Location,
|
||||
source: Utf8Error,
|
||||
#[snafu(source)]
|
||||
error: Utf8Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Table nod found, table: {}", table_name))]
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -73,13 +74,15 @@ impl Display for OpenRegion {
|
||||
pub struct OpenRegion {
|
||||
pub region_ident: RegionIdent,
|
||||
pub region_storage_path: String,
|
||||
pub options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl OpenRegion {
|
||||
pub fn new(region_ident: RegionIdent, path: &str) -> Self {
|
||||
pub fn new(region_ident: RegionIdent, path: &str, options: HashMap<String, String>) -> Self {
|
||||
Self {
|
||||
region_ident,
|
||||
region_storage_path: path.to_string(),
|
||||
options,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -127,12 +130,13 @@ mod tests {
|
||||
engine: "mito2".to_string(),
|
||||
},
|
||||
"test/foo",
|
||||
HashMap::new(),
|
||||
));
|
||||
|
||||
let serialized = serde_json::to_string(&open_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo"}}"#,
|
||||
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","options":{}}}"#,
|
||||
serialized
|
||||
);
|
||||
|
||||
|
||||
@@ -55,13 +55,18 @@ pub mod table_region;
|
||||
#[allow(deprecated)]
|
||||
pub mod table_route;
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
@@ -69,6 +74,7 @@ use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||
|
||||
use self::catalog_name::{CatalogManager, CatalogNameKey, CatalogNameValue};
|
||||
use self::datanode_table::RegionInfo;
|
||||
use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
|
||||
use self::table_route::{TableRouteManager, TableRouteValue};
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
@@ -80,7 +86,7 @@ use crate::DatanodeId;
|
||||
|
||||
pub const REMOVED_PREFIX: &str = "__removed";
|
||||
|
||||
const NAME_PATTERN: &str = "[a-zA-Z_:-][a-zA-Z0-9_:-]*";
|
||||
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
@@ -154,6 +160,116 @@ macro_rules! ensure_values {
|
||||
};
|
||||
}
|
||||
|
||||
/// A struct containing a deserialized value(`inner`) and an original bytes.
|
||||
///
|
||||
/// - Serialize behaviors:
|
||||
///
|
||||
/// The `inner` field will be ignored.
|
||||
///
|
||||
/// - Deserialize behaviors:
|
||||
///
|
||||
/// The `inner` field will be deserialized from the `bytes` field.
|
||||
pub struct DeserializedValueWithBytes<T: DeserializeOwned + Serialize> {
|
||||
// The original bytes of the inner.
|
||||
bytes: Bytes,
|
||||
// The value was deserialized from the original bytes.
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T: DeserializeOwned + Serialize> Deref for DeserializedValueWithBytes<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: DeserializeOwned + Serialize + Debug> Debug for DeserializedValueWithBytes<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"DeserializedValueWithBytes(inner: {:?}, bytes: {:?})",
|
||||
self.inner, self.bytes
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T> {
|
||||
/// - Serialize behaviors:
|
||||
///
|
||||
/// The `inner` field will be ignored.
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
// Safety: The original bytes are always JSON encoded.
|
||||
// It's more efficiently than `serialize_bytes`.
|
||||
serializer.serialize_str(&String::from_utf8_lossy(&self.bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: DeserializeOwned + Serialize> Deserialize<'de> for DeserializedValueWithBytes<T> {
|
||||
/// - Deserialize behaviors:
|
||||
///
|
||||
/// The `inner` field will be deserialized from the `bytes` field.
|
||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = String::deserialize(deserializer)?;
|
||||
let bytes = Bytes::from(buf);
|
||||
|
||||
let value = DeserializedValueWithBytes::from_inner_bytes(bytes)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithBytes<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
bytes: self.bytes.clone(),
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
|
||||
/// Returns a struct containing a deserialized value and an original `bytes`.
|
||||
/// It accepts original bytes of inner.
|
||||
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
|
||||
let inner = serde_json::from_slice(&bytes).context(error::SerdeJsonSnafu)?;
|
||||
Ok(Self { bytes, inner })
|
||||
}
|
||||
|
||||
/// Returns a struct containing a deserialized value and an original `bytes`.
|
||||
/// It accepts original bytes of inner.
|
||||
pub fn from_inner_slice(bytes: &[u8]) -> Result<Self> {
|
||||
Self::from_inner_bytes(Bytes::copy_from_slice(bytes))
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner
|
||||
}
|
||||
|
||||
/// Returns original `bytes`
|
||||
pub fn into_bytes(&self) -> Vec<u8> {
|
||||
self.bytes.to_vec()
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
/// Notes: used for test purpose.
|
||||
pub fn from_inner(inner: T) -> Self {
|
||||
let bytes = serde_json::to_vec(&inner).unwrap();
|
||||
|
||||
Self {
|
||||
bytes: Bytes::from(bytes),
|
||||
inner,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TableMetadataManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
TableMetadataManager {
|
||||
@@ -211,7 +327,10 @@ impl TableMetadataManager {
|
||||
pub async fn get_full_table_info(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<(Option<TableInfoValue>, Option<TableRouteValue>)> {
|
||||
) -> Result<(
|
||||
Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
Option<DeserializedValueWithBytes<TableRouteValue>>,
|
||||
)> {
|
||||
let (get_table_route_txn, table_route_decoder) =
|
||||
self.table_route_manager.build_get_txn(table_id);
|
||||
|
||||
@@ -256,6 +375,7 @@ impl TableMetadataManager {
|
||||
.table_name_manager()
|
||||
.build_create_txn(&table_name, table_id)?;
|
||||
|
||||
let region_options = (&table_info.meta.options).into();
|
||||
// Creates table info.
|
||||
let table_info_value = TableInfoValue::new(table_info);
|
||||
let (create_table_info_txn, on_create_table_info_failure) = self
|
||||
@@ -268,6 +388,7 @@ impl TableMetadataManager {
|
||||
table_id,
|
||||
&engine,
|
||||
®ion_storage_path,
|
||||
region_options,
|
||||
distribution,
|
||||
)?;
|
||||
|
||||
@@ -288,15 +409,17 @@ impl TableMetadataManager {
|
||||
|
||||
// Checks whether metadata was already created.
|
||||
if !r.succeeded {
|
||||
let remote_table_info =
|
||||
on_create_table_info_failure(&r.responses)?.context(error::UnexpectedSnafu {
|
||||
let remote_table_info = on_create_table_info_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the create table metadata",
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let remote_table_route =
|
||||
on_create_table_route_failure(&r.responses)?.context(error::UnexpectedSnafu {
|
||||
let remote_table_route = on_create_table_route_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the create table metadata",
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the creating table metadata";
|
||||
ensure_values!(remote_table_info, table_info_value, op_name);
|
||||
@@ -310,8 +433,8 @@ impl TableMetadataManager {
|
||||
/// The caller MUST ensure it has the exclusive access to `TableNameKey`.
|
||||
pub async fn delete_table_metadata(
|
||||
&self,
|
||||
table_info_value: &TableInfoValue,
|
||||
table_route_value: &TableRouteValue,
|
||||
table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
) -> Result<()> {
|
||||
let table_info = &table_info_value.table_info;
|
||||
let table_id = table_info.ident.table_id;
|
||||
@@ -361,7 +484,7 @@ impl TableMetadataManager {
|
||||
/// and the new `TableNameKey` MUST be empty.
|
||||
pub async fn rename_table(
|
||||
&self,
|
||||
current_table_info_value: TableInfoValue,
|
||||
current_table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
new_table_name: String,
|
||||
) -> Result<()> {
|
||||
let current_table_info = ¤t_table_info_value.table_info;
|
||||
@@ -386,9 +509,11 @@ impl TableMetadataManager {
|
||||
table_id,
|
||||
)?;
|
||||
|
||||
let new_table_info_value = current_table_info_value.with_update(move |table_info| {
|
||||
table_info.name = new_table_name;
|
||||
});
|
||||
let new_table_info_value = current_table_info_value
|
||||
.inner
|
||||
.with_update(move |table_info| {
|
||||
table_info.name = new_table_name;
|
||||
});
|
||||
|
||||
// Updates table info.
|
||||
let (update_table_info_txn, on_update_table_info_failure) = self
|
||||
@@ -401,10 +526,11 @@ impl TableMetadataManager {
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_info =
|
||||
on_update_table_info_failure(&r.responses)?.context(error::UnexpectedSnafu {
|
||||
let remote_table_info = on_update_table_info_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the rename table metadata",
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the renaming table metadata";
|
||||
ensure_values!(remote_table_info, new_table_info_value, op_name);
|
||||
@@ -416,7 +542,7 @@ impl TableMetadataManager {
|
||||
/// Updates table info and returns an error if different metadata exists.
|
||||
pub async fn update_table_info(
|
||||
&self,
|
||||
current_table_info_value: TableInfoValue,
|
||||
current_table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
new_table_info: RawTableInfo,
|
||||
) -> Result<()> {
|
||||
let table_id = current_table_info_value.table_info.ident.table_id;
|
||||
@@ -432,10 +558,11 @@ impl TableMetadataManager {
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_info =
|
||||
on_update_table_info_failure(&r.responses)?.context(error::UnexpectedSnafu {
|
||||
let remote_table_info = on_update_table_info_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table info during the updating table info",
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the updating table info";
|
||||
ensure_values!(remote_table_info, new_table_info_value, op_name);
|
||||
@@ -446,10 +573,10 @@ impl TableMetadataManager {
|
||||
pub async fn update_table_route(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
engine: &str,
|
||||
region_storage_path: &str,
|
||||
current_table_route_value: TableRouteValue,
|
||||
region_info: RegionInfo,
|
||||
current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_region_routes: Vec<RegionRoute>,
|
||||
new_region_options: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
// Updates the datanode table key value pairs.
|
||||
let current_region_distribution =
|
||||
@@ -458,10 +585,10 @@ impl TableMetadataManager {
|
||||
|
||||
let update_datanode_table_txn = self.datanode_table_manager().build_update_txn(
|
||||
table_id,
|
||||
engine,
|
||||
region_storage_path,
|
||||
region_info,
|
||||
current_region_distribution,
|
||||
new_region_distribution,
|
||||
new_region_options,
|
||||
)?;
|
||||
|
||||
// Updates the table_route.
|
||||
@@ -477,10 +604,11 @@ impl TableMetadataManager {
|
||||
|
||||
// Checks whether metadata was already updated.
|
||||
if !r.succeeded {
|
||||
let remote_table_route =
|
||||
on_update_table_route_failure(&r.responses)?.context(error::UnexpectedSnafu {
|
||||
let remote_table_route = on_update_table_route_failure(&r.responses)?
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "Reads the empty table route during the updating table route",
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let op_name = "the updating table route";
|
||||
ensure_values!(remote_table_route, new_table_route_value, op_name);
|
||||
@@ -553,9 +681,10 @@ impl_optional_meta_value! {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use futures::TryStreamExt;
|
||||
@@ -563,14 +692,43 @@ mod tests {
|
||||
|
||||
use super::datanode_table::DatanodeTableKey;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::key::datanode_table::RegionInfo;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::{to_removed_key, TableMetadataManager};
|
||||
use crate::key::{to_removed_key, DeserializedValueWithBytes, TableMetadataManager};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{region_distribution, Region, RegionRoute};
|
||||
|
||||
#[test]
|
||||
fn test_deserialized_value_with_bytes() {
|
||||
let region_route = new_test_region_route();
|
||||
let region_routes = vec![region_route.clone()];
|
||||
|
||||
let expected_region_routes =
|
||||
TableRouteValue::new(vec![region_route.clone(), region_route.clone()]);
|
||||
let expected = serde_json::to_vec(&expected_region_routes).unwrap();
|
||||
|
||||
// Serialize behaviors:
|
||||
// The inner field will be ignored.
|
||||
let value = DeserializedValueWithBytes {
|
||||
// ignored
|
||||
inner: TableRouteValue::new(region_routes.clone()),
|
||||
bytes: Bytes::from(expected.clone()),
|
||||
};
|
||||
|
||||
let encoded = serde_json::to_vec(&value).unwrap();
|
||||
|
||||
// Deserialize behaviors:
|
||||
// The inner field will be deserialized from the bytes field.
|
||||
let decoded: DeserializedValueWithBytes<TableRouteValue> =
|
||||
serde_json::from_slice(&encoded).unwrap();
|
||||
|
||||
assert_eq!(decoded.inner, expected_region_routes);
|
||||
assert_eq!(decoded.bytes, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_removed_key() {
|
||||
let key = "test_key";
|
||||
@@ -660,8 +818,14 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(remote_table_info.unwrap().table_info, table_info);
|
||||
assert_eq!(remote_table_route.unwrap().region_routes, region_routes);
|
||||
assert_eq!(
|
||||
remote_table_info.unwrap().into_inner().table_info,
|
||||
table_info
|
||||
);
|
||||
assert_eq!(
|
||||
remote_table_route.unwrap().into_inner().region_routes,
|
||||
region_routes
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -674,7 +838,8 @@ mod tests {
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let table_id = table_info.ident.table_id;
|
||||
let datanode_id = 2;
|
||||
let table_route_value = TableRouteValue::new(region_routes.clone());
|
||||
let table_route_value =
|
||||
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
|
||||
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
@@ -682,7 +847,8 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_info_value = TableInfoValue::new(table_info.clone());
|
||||
let table_info_value =
|
||||
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
|
||||
|
||||
// deletes metadata.
|
||||
table_metadata_manager
|
||||
@@ -723,7 +889,8 @@ mod tests {
|
||||
.get_removed(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
assert_eq!(removed_table_info.table_info, table_info);
|
||||
|
||||
let removed_table_route = table_metadata_manager
|
||||
@@ -731,7 +898,8 @@ mod tests {
|
||||
.get_removed(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
assert_eq!(removed_table_route.region_routes, region_routes);
|
||||
}
|
||||
|
||||
@@ -750,7 +918,9 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
let new_table_name = "another_name".to_string();
|
||||
let table_info_value = TableInfoValue::new(table_info.clone());
|
||||
let table_info_value =
|
||||
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
|
||||
|
||||
table_metadata_manager
|
||||
.rename_table(table_info_value.clone(), new_table_name.clone())
|
||||
.await
|
||||
@@ -762,7 +932,8 @@ mod tests {
|
||||
.unwrap();
|
||||
let mut modified_table_info = table_info.clone();
|
||||
modified_table_info.name = "hi".to_string();
|
||||
let modified_table_info_value = table_info_value.update(modified_table_info);
|
||||
let modified_table_info_value =
|
||||
DeserializedValueWithBytes::from_inner(table_info_value.update(modified_table_info));
|
||||
// if the table_info_value is wrong, it should return an error.
|
||||
// The ABA problem.
|
||||
assert!(table_metadata_manager
|
||||
@@ -816,7 +987,8 @@ mod tests {
|
||||
.unwrap();
|
||||
let mut new_table_info = table_info.clone();
|
||||
new_table_info.name = "hi".to_string();
|
||||
let current_table_info_value = TableInfoValue::new(table_info.clone());
|
||||
let current_table_info_value =
|
||||
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
|
||||
// should be ok.
|
||||
table_metadata_manager
|
||||
.update_table_info(current_table_info_value.clone(), new_table_info.clone())
|
||||
@@ -834,12 +1006,15 @@ mod tests {
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
assert_eq!(updated_table_info.table_info, new_table_info);
|
||||
|
||||
let mut wrong_table_info = table_info.clone();
|
||||
wrong_table_info.name = "wrong".to_string();
|
||||
let wrong_table_info_value = current_table_info_value.update(wrong_table_info);
|
||||
let wrong_table_info_value = DeserializedValueWithBytes::from_inner(
|
||||
current_table_info_value.update(wrong_table_info),
|
||||
);
|
||||
// if the current_table_info_value is wrong, it should return an error.
|
||||
// The ABA problem.
|
||||
assert!(table_metadata_manager
|
||||
@@ -878,7 +1053,8 @@ mod tests {
|
||||
let engine = table_info.meta.engine.as_str();
|
||||
let region_storage_path =
|
||||
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
|
||||
let current_table_route_value = TableRouteValue::new(region_routes.clone());
|
||||
let current_table_route_value =
|
||||
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
@@ -894,10 +1070,14 @@ mod tests {
|
||||
table_metadata_manager
|
||||
.update_table_route(
|
||||
table_id,
|
||||
engine,
|
||||
®ion_storage_path,
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -907,24 +1087,36 @@ mod tests {
|
||||
table_metadata_manager
|
||||
.update_table_route(
|
||||
table_id,
|
||||
engine,
|
||||
®ion_storage_path,
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let current_table_route_value = current_table_route_value.update(new_region_routes.clone());
|
||||
let current_table_route_value = DeserializedValueWithBytes::from_inner(
|
||||
current_table_route_value
|
||||
.inner
|
||||
.update(new_region_routes.clone()),
|
||||
);
|
||||
let new_region_routes = vec![new_region_route(2, 4), new_region_route(5, 5)];
|
||||
// it should be ok.
|
||||
table_metadata_manager
|
||||
.update_table_route(
|
||||
table_id,
|
||||
engine,
|
||||
®ion_storage_path,
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
current_table_route_value.clone(),
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -932,19 +1124,24 @@ mod tests {
|
||||
|
||||
// if the current_table_route_value is wrong, it should return an error.
|
||||
// The ABA problem.
|
||||
let wrong_table_route_value = current_table_route_value.update(vec![
|
||||
new_region_route(1, 1),
|
||||
new_region_route(2, 2),
|
||||
new_region_route(3, 3),
|
||||
new_region_route(4, 4),
|
||||
]);
|
||||
let wrong_table_route_value =
|
||||
DeserializedValueWithBytes::from_inner(current_table_route_value.update(vec![
|
||||
new_region_route(1, 1),
|
||||
new_region_route(2, 2),
|
||||
new_region_route(3, 3),
|
||||
new_region_route(4, 4),
|
||||
]));
|
||||
assert!(table_metadata_manager
|
||||
.update_table_route(
|
||||
table_id,
|
||||
engine,
|
||||
®ion_storage_path,
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
},
|
||||
wrong_table_route_value,
|
||||
new_region_routes
|
||||
new_region_routes,
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
@@ -32,6 +33,21 @@ use crate::rpc::store::RangeRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::DatanodeId;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
||||
/// RegionInfo
|
||||
/// For compatible reason, DON'T modify the field name.
|
||||
pub struct RegionInfo {
|
||||
#[serde(default)]
|
||||
// The table engine, it SHOULD be immutable after created.
|
||||
pub engine: String,
|
||||
// The region storage path, it SHOULD be immutable after created.
|
||||
#[serde(default)]
|
||||
pub region_storage_path: String,
|
||||
// The region options.
|
||||
#[serde(default)]
|
||||
pub region_options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
pub struct DatanodeTableKey {
|
||||
datanode_id: DatanodeId,
|
||||
table_id: TableId,
|
||||
@@ -85,25 +101,17 @@ impl TableMetaKey for DatanodeTableKey {
|
||||
pub struct DatanodeTableValue {
|
||||
pub table_id: TableId,
|
||||
pub regions: Vec<RegionNumber>,
|
||||
#[serde(default)]
|
||||
pub engine: String,
|
||||
#[serde(default)]
|
||||
pub region_storage_path: String,
|
||||
#[serde(flatten)]
|
||||
pub region_info: RegionInfo,
|
||||
version: u64,
|
||||
}
|
||||
|
||||
impl DatanodeTableValue {
|
||||
pub fn new(
|
||||
table_id: TableId,
|
||||
regions: Vec<RegionNumber>,
|
||||
engine: String,
|
||||
region_storage_path: String,
|
||||
) -> Self {
|
||||
pub fn new(table_id: TableId, regions: Vec<RegionNumber>, region_info: RegionInfo) -> Self {
|
||||
Self {
|
||||
table_id,
|
||||
regions,
|
||||
engine,
|
||||
region_storage_path,
|
||||
region_info,
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
@@ -156,6 +164,7 @@ impl DatanodeTableManager {
|
||||
table_id: TableId,
|
||||
engine: &str,
|
||||
region_storage_path: &str,
|
||||
region_options: HashMap<String, String>,
|
||||
distribution: RegionDistribution,
|
||||
) -> Result<Txn> {
|
||||
let txns = distribution
|
||||
@@ -165,8 +174,11 @@ impl DatanodeTableManager {
|
||||
let val = DatanodeTableValue::new(
|
||||
table_id,
|
||||
regions,
|
||||
engine.to_string(),
|
||||
region_storage_path.to_string(),
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: region_options.clone(),
|
||||
},
|
||||
);
|
||||
|
||||
Ok(TxnOp::Put(key.as_raw_key(), val.try_as_raw_value()?))
|
||||
@@ -182,10 +194,10 @@ impl DatanodeTableManager {
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
engine: &str,
|
||||
region_storage_path: &str,
|
||||
region_info: RegionInfo,
|
||||
current_region_distribution: RegionDistribution,
|
||||
new_region_distribution: RegionDistribution,
|
||||
new_region_options: &HashMap<String, String>,
|
||||
) -> Result<Txn> {
|
||||
let mut opts = Vec::new();
|
||||
|
||||
@@ -197,33 +209,20 @@ impl DatanodeTableManager {
|
||||
opts.push(TxnOp::Delete(raw_key))
|
||||
}
|
||||
}
|
||||
|
||||
let need_update_options = region_info.region_options != *new_region_options;
|
||||
for (datanode, regions) in new_region_distribution.into_iter() {
|
||||
if let Some(current_region) = current_region_distribution.get(&datanode) {
|
||||
// Updates if need.
|
||||
if *current_region != regions {
|
||||
let key = DatanodeTableKey::new(datanode, table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let val = DatanodeTableValue::new(
|
||||
table_id,
|
||||
regions,
|
||||
engine.to_string(),
|
||||
region_storage_path.to_string(),
|
||||
)
|
||||
.try_as_raw_value()?;
|
||||
opts.push(TxnOp::Put(raw_key, val));
|
||||
}
|
||||
} else {
|
||||
// New datanodes
|
||||
let need_update =
|
||||
if let Some(current_region) = current_region_distribution.get(&datanode) {
|
||||
// Updates if need.
|
||||
*current_region != regions || need_update_options
|
||||
} else {
|
||||
true
|
||||
};
|
||||
if need_update {
|
||||
let key = DatanodeTableKey::new(datanode, table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let val = DatanodeTableValue::new(
|
||||
table_id,
|
||||
regions,
|
||||
engine.to_string(),
|
||||
region_storage_path.to_string(),
|
||||
)
|
||||
.try_as_raw_value()?;
|
||||
let val = DatanodeTableValue::new(table_id, regions, region_info.clone())
|
||||
.try_as_raw_value()?;
|
||||
opts.push(TxnOp::Put(raw_key, val));
|
||||
}
|
||||
}
|
||||
@@ -270,11 +269,10 @@ mod tests {
|
||||
let value = DatanodeTableValue {
|
||||
table_id: 42,
|
||||
regions: vec![1, 2, 3],
|
||||
engine: Default::default(),
|
||||
region_storage_path: Default::default(),
|
||||
region_info: RegionInfo::default(),
|
||||
version: 1,
|
||||
};
|
||||
let literal = br#"{"table_id":42,"regions":[1,2,3],"engine":"","region_storage_path":"","version":1}"#;
|
||||
let literal = br#"{"table_id":42,"regions":[1,2,3],"engine":"","region_storage_path":"","region_options":{},"version":1}"#;
|
||||
|
||||
let raw_value = value.try_as_raw_value().unwrap();
|
||||
assert_eq!(raw_value, literal);
|
||||
|
||||
@@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize};
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::TABLE_INFO_KEY_PREFIX;
|
||||
use super::{DeserializedValueWithBytes, TABLE_INFO_KEY_PREFIX};
|
||||
use crate::error::Result;
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
@@ -103,7 +103,7 @@ impl TableInfoManager {
|
||||
table_id: TableId,
|
||||
) -> (
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>>,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
) {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -119,7 +119,7 @@ impl TableInfoManager {
|
||||
table_info_value: &TableInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>>,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
)> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -143,15 +143,15 @@ impl TableInfoManager {
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_info_value: &TableInfoValue,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
new_table_info_value: &TableInfoValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>>,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>,
|
||||
)> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = current_table_info_value.try_as_raw_value()?;
|
||||
let raw_value = current_table_info_value.into_bytes();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
@@ -172,11 +172,11 @@ impl TableInfoManager {
|
||||
pub(crate) fn build_delete_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_info_value: &TableInfoValue,
|
||||
table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<Txn> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = table_info_value.try_as_raw_value()?;
|
||||
let raw_value = table_info_value.into_bytes();
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
|
||||
|
||||
let txn = Txn::new().and_then(vec![
|
||||
@@ -189,7 +189,8 @@ impl TableInfoManager {
|
||||
|
||||
fn build_decode_fn(
|
||||
raw_key: Vec<u8>,
|
||||
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableInfoValue>> {
|
||||
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>>
|
||||
{
|
||||
move |kvs: &Vec<TxnOpResponse>| {
|
||||
kvs.iter()
|
||||
.filter_map(|resp| {
|
||||
@@ -201,29 +202,35 @@ impl TableInfoManager {
|
||||
})
|
||||
.flat_map(|r| &r.kvs)
|
||||
.find(|kv| kv.key == raw_key)
|
||||
.map(|kv| TableInfoValue::try_from_raw_value(&kv.value))
|
||||
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn get_removed(&self, table_id: TableId) -> Result<Option<TableInfoValue>> {
|
||||
pub async fn get_removed(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let key = TableInfoKey::new(table_id).to_string();
|
||||
let removed_key = to_removed_key(&key).into_bytes();
|
||||
self.kv_backend
|
||||
.get(&removed_key)
|
||||
.await?
|
||||
.map(|x| TableInfoValue::try_from_raw_value(&x.value))
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn get(&self, table_id: TableId) -> Result<Option<TableInfoValue>> {
|
||||
pub async fn get(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| TableInfoValue::try_from_raw_value(&x.value))
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,6 +268,8 @@ mod tests {
|
||||
test_ok("my_table");
|
||||
test_ok("cpu:metrics");
|
||||
test_ok(":cpu:metrics");
|
||||
test_ok("sys.cpu.system");
|
||||
test_ok("foo-bar");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::fmt::Display;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::DeserializedValueWithBytes;
|
||||
use crate::error::Result;
|
||||
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
@@ -81,7 +82,7 @@ impl TableRouteManager {
|
||||
table_id: TableId,
|
||||
) -> (
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>>,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
) {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -97,7 +98,7 @@ impl TableRouteManager {
|
||||
table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>>,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
@@ -121,15 +122,15 @@ impl TableRouteManager {
|
||||
pub(crate) fn build_update_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
current_table_route_value: &TableRouteValue,
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_table_route_value: &TableRouteValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>>,
|
||||
impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>,
|
||||
)> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = current_table_route_value.try_as_raw_value()?;
|
||||
let raw_value = current_table_route_value.into_bytes();
|
||||
let new_raw_value: Vec<u8> = new_table_route_value.try_as_raw_value()?;
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -148,11 +149,11 @@ impl TableRouteManager {
|
||||
pub(crate) fn build_delete_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_route_value: &TableRouteValue,
|
||||
table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
) -> Result<Txn> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
let raw_key = key.as_raw_key();
|
||||
let raw_value = table_route_value.try_as_raw_value()?;
|
||||
let raw_value = table_route_value.into_bytes();
|
||||
let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key));
|
||||
|
||||
let txn = Txn::new().and_then(vec![
|
||||
@@ -165,7 +166,8 @@ impl TableRouteManager {
|
||||
|
||||
fn build_decode_fn(
|
||||
raw_key: Vec<u8>,
|
||||
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<TableRouteValue>> {
|
||||
) -> impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>
|
||||
{
|
||||
move |response: &Vec<TxnOpResponse>| {
|
||||
response
|
||||
.iter()
|
||||
@@ -178,28 +180,34 @@ impl TableRouteManager {
|
||||
})
|
||||
.flat_map(|r| &r.kvs)
|
||||
.find(|kv| kv.key == raw_key)
|
||||
.map(|kv| TableRouteValue::try_from_raw_value(&kv.value))
|
||||
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
|
||||
pub async fn get(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
let key = TableRouteKey::new(table_id);
|
||||
self.kv_backend
|
||||
.get(&key.as_raw_key())
|
||||
.await?
|
||||
.map(|kv| TableRouteValue::try_from_raw_value(&kv.value))
|
||||
.map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn get_removed(&self, table_id: TableId) -> Result<Option<TableRouteValue>> {
|
||||
pub async fn get_removed(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> {
|
||||
let key = TableRouteKey::new(table_id).to_string();
|
||||
let removed_key = to_removed_key(&key).into_bytes();
|
||||
self.kv_backend
|
||||
.get(&removed_key)
|
||||
.await?
|
||||
.map(|x| TableRouteValue::try_from_raw_value(&x.value))
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
@@ -209,7 +217,7 @@ impl TableRouteManager {
|
||||
) -> Result<Option<RegionDistribution>> {
|
||||
self.get(table_id)
|
||||
.await?
|
||||
.map(|table_route| region_distribution(&table_route.region_routes))
|
||||
.map(|table_route| region_distribution(&table_route.into_inner().region_routes))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,3 +19,4 @@ pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
|
||||
pub(crate) const METRIC_META_PROCEDURE_CREATE_TABLE: &str = "meta.procedure.create_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_DROP_TABLE: &str = "meta.procedure.drop_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_ALTER_TABLE: &str = "meta.procedure.alter_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_TRUNCATE_TABLE: &str = "meta.procedure.truncate_table";
|
||||
|
||||
@@ -21,6 +21,8 @@ use api::v1::meta::{
|
||||
SubmitDdlTaskResponse as PbSubmitDdlTaskResponse, TruncateTableTask as PbTruncateTableTask,
|
||||
};
|
||||
use api::v1::{AlterExpr, CreateTableExpr, DropTableExpr, TruncateTableExpr};
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine as _;
|
||||
use prost::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -65,8 +67,18 @@ impl DdlTask {
|
||||
DdlTask::AlterTable(AlterTableTask { alter_table })
|
||||
}
|
||||
|
||||
pub fn new_truncate_table(truncate_table: TruncateTableExpr) -> Self {
|
||||
DdlTask::TruncateTable(TruncateTableTask { truncate_table })
|
||||
pub fn new_truncate_table(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
table_id: TableId,
|
||||
) -> Self {
|
||||
DdlTask::TruncateTable(TruncateTableTask {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +124,12 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
|
||||
alter_table: Some(task.alter_table),
|
||||
}),
|
||||
DdlTask::TruncateTable(task) => Task::TruncateTableTask(PbTruncateTableTask {
|
||||
truncate_table: Some(task.truncate_table),
|
||||
truncate_table: Some(TruncateTableExpr {
|
||||
catalog_name: task.catalog,
|
||||
schema_name: task.schema,
|
||||
table_name: task.table,
|
||||
table_id: Some(api::v1::TableId { id: task.table_id }),
|
||||
}),
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -272,7 +289,8 @@ impl Serialize for CreateTableTask {
|
||||
table_info,
|
||||
};
|
||||
let buf = pb.encode_to_vec();
|
||||
serializer.serialize_bytes(&buf)
|
||||
let encoded = general_purpose::STANDARD_NO_PAD.encode(buf);
|
||||
serializer.serialize_str(&encoded)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,7 +299,10 @@ impl<'de> Deserialize<'de> for CreateTableTask {
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = Vec::<u8>::deserialize(deserializer)?;
|
||||
let encoded = String::deserialize(deserializer)?;
|
||||
let buf = general_purpose::STANDARD_NO_PAD
|
||||
.decode(encoded)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
let expr: PbCreateTableTask = PbCreateTableTask::decode(&*buf)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
@@ -338,7 +359,8 @@ impl Serialize for AlterTableTask {
|
||||
alter_table: Some(self.alter_table.clone()),
|
||||
};
|
||||
let buf = pb.encode_to_vec();
|
||||
serializer.serialize_bytes(&buf)
|
||||
let encoded = general_purpose::STANDARD_NO_PAD.encode(buf);
|
||||
serializer.serialize_str(&encoded)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +369,10 @@ impl<'de> Deserialize<'de> for AlterTableTask {
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = Vec::<u8>::deserialize(deserializer)?;
|
||||
let encoded = String::deserialize(deserializer)?;
|
||||
let buf = general_purpose::STANDARD_NO_PAD
|
||||
.decode(encoded)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
let expr: PbAlterTableTask = PbAlterTableTask::decode(&*buf)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
@@ -358,27 +383,28 @@ impl<'de> Deserialize<'de> for AlterTableTask {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TruncateTableTask {
|
||||
pub truncate_table: TruncateTableExpr,
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
impl TruncateTableTask {
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
TableReference {
|
||||
catalog: &self.truncate_table.catalog_name,
|
||||
schema: &self.truncate_table.schema_name,
|
||||
table: &self.truncate_table.table_name,
|
||||
catalog: &self.catalog,
|
||||
schema: &self.schema,
|
||||
table: &self.table,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
let table = &self.truncate_table;
|
||||
|
||||
TableName {
|
||||
catalog_name: table.catalog_name.to_string(),
|
||||
schema_name: table.schema_name.to_string(),
|
||||
table_name: table.table_name.to_string(),
|
||||
catalog_name: self.catalog.to_string(),
|
||||
schema_name: self.schema.to_string(),
|
||||
table_name: self.table.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -388,39 +414,20 @@ impl TryFrom<PbTruncateTableTask> for TruncateTableTask {
|
||||
|
||||
fn try_from(pb: PbTruncateTableTask) -> Result<Self> {
|
||||
let truncate_table = pb.truncate_table.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected truncate_table",
|
||||
err_msg: "expected drop table",
|
||||
})?;
|
||||
|
||||
Ok(TruncateTableTask { truncate_table })
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TruncateTableTask {
|
||||
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let pb = PbTruncateTableTask {
|
||||
truncate_table: Some(self.truncate_table.clone()),
|
||||
};
|
||||
let buf = pb.encode_to_vec();
|
||||
serializer.serialize_bytes(&buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for TruncateTableTask {
|
||||
fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = Vec::<u8>::deserialize(deserializer)?;
|
||||
let task: PbTruncateTableTask = PbTruncateTableTask::decode(&*buf)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
let task = TruncateTableTask::try_from(task)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
Ok(task)
|
||||
Ok(Self {
|
||||
catalog: truncate_table.catalog_name,
|
||||
schema: truncate_table.schema_name,
|
||||
table: truncate_table.table_name,
|
||||
table_id: truncate_table
|
||||
.table_id
|
||||
.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected table_id",
|
||||
})?
|
||||
.id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -428,12 +435,12 @@ impl<'de> Deserialize<'de> for TruncateTableTask {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::CreateTableExpr;
|
||||
use api::v1::{AlterExpr, CreateTableExpr};
|
||||
use datatypes::schema::SchemaBuilder;
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::test_util::table_info::test_table_info;
|
||||
|
||||
use super::CreateTableTask;
|
||||
use super::{AlterTableTask, CreateTableTask};
|
||||
|
||||
#[test]
|
||||
fn test_basic_ser_de_create_table_task() {
|
||||
@@ -450,4 +457,16 @@ mod tests {
|
||||
let de = serde_json::from_slice(&output).unwrap();
|
||||
assert_eq!(task, de);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_ser_de_alter_table_task() {
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterExpr::default(),
|
||||
};
|
||||
|
||||
let output = serde_json::to_vec(&task).unwrap();
|
||||
|
||||
let de = serde_json::from_slice(&output).unwrap();
|
||||
assert_eq!(task, de);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
backon = "0.4"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
futures.workspace = true
|
||||
|
||||
@@ -18,13 +18,15 @@ use std::sync::Arc;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
use crate::procedure::ProcedureId;
|
||||
|
||||
/// Procedure error.
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to execute procedure due to external error"))]
|
||||
External { source: BoxedError },
|
||||
@@ -34,7 +36,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to serialize to json"))]
|
||||
ToJson {
|
||||
source: serde_json::Error,
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -54,7 +57,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to delete {}", key))]
|
||||
DeleteState {
|
||||
key: String,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to delete keys: '{keys}'"))]
|
||||
@@ -73,7 +77,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to deserialize from json"))]
|
||||
FromJson {
|
||||
source: serde_json::Error,
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -85,7 +90,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to wait watcher"))]
|
||||
WaitWatcher {
|
||||
source: tokio::sync::watch::error::RecvError,
|
||||
#[snafu(source)]
|
||||
error: tokio::sync::watch::error::RecvError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -102,7 +108,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("Corrupted data, error: "))]
|
||||
CorruptedData { source: FromUtf8Error },
|
||||
CorruptedData {
|
||||
#[snafu(source)]
|
||||
error: FromUtf8Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start the remove_outdated_meta method, error"))]
|
||||
StartRemoveOutdatedMetaTask {
|
||||
|
||||
@@ -229,7 +229,7 @@ impl ManagerContext {
|
||||
let procedure = loader(&message.data)
|
||||
.map_err(|e| {
|
||||
logging::error!(
|
||||
"Failed to load procedure data, key: {}, source: {}",
|
||||
"Failed to load procedure data, key: {}, source: {:?}",
|
||||
procedure_id,
|
||||
e
|
||||
);
|
||||
|
||||
@@ -453,14 +453,13 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::PlainError;
|
||||
use common_error::ext::{ErrorExt, PlainError};
|
||||
use common_error::mock::MockError;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use futures_util::future::BoxFuture;
|
||||
use futures_util::FutureExt;
|
||||
use object_store::ObjectStore;
|
||||
use snafu::ErrorCompat;
|
||||
|
||||
use super::*;
|
||||
use crate::local::test_util;
|
||||
@@ -943,14 +942,7 @@ mod tests {
|
||||
|
||||
// Run the runner and execute the procedure.
|
||||
runner.run().await;
|
||||
let err = meta
|
||||
.state()
|
||||
.error()
|
||||
.unwrap()
|
||||
.iter_chain()
|
||||
.last()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let err = meta.state().error().unwrap().output_msg();
|
||||
assert!(err.contains("subprocedure failed"), "{err}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ impl ProcedureStore {
|
||||
serde_json::from_slice(value)
|
||||
.map_err(|e| {
|
||||
// `e` doesn't impl ErrorExt so we print it as normal error.
|
||||
logging::error!("Failed to parse value, key: {:?}, source: {}", key, e);
|
||||
logging::error!("Failed to parse value, key: {:?}, source: {:?}", key, e);
|
||||
e
|
||||
})
|
||||
.ok()
|
||||
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
api = { workspace = true }
|
||||
async-trait.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datafusion-common.workspace = true
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::any::Any;
|
||||
use arrow::error::ArrowError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_recordbatch::error::Error as RecordbatchError;
|
||||
use datafusion_common::DataFusionError;
|
||||
use datatypes::arrow;
|
||||
@@ -26,8 +27,9 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
use statrs::StatsError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to execute Python UDF: {}", msg))]
|
||||
PyUdf {
|
||||
@@ -44,7 +46,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to execute function"))]
|
||||
ExecuteFunction {
|
||||
source: DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -57,7 +60,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to generate function"))]
|
||||
GenerateFunction {
|
||||
source: StatsError,
|
||||
#[snafu(source)]
|
||||
error: StatsError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -109,7 +113,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("General DataFusion error"))]
|
||||
GeneralDataFusion {
|
||||
source: DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -133,14 +138,16 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to cast array to {:?}", typ))]
|
||||
TypeCast {
|
||||
source: ArrowError,
|
||||
#[snafu(source)]
|
||||
error: ArrowError,
|
||||
typ: arrow::datatypes::DataType,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform compute operation on arrow arrays"))]
|
||||
ArrowCompute {
|
||||
source: ArrowError,
|
||||
#[snafu(source)]
|
||||
error: ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
datafusion-common.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
|
||||
@@ -17,17 +17,20 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Fail to create datafusion record batch"))]
|
||||
NewDfRecordBatch {
|
||||
source: datatypes::arrow::error::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: datatypes::arrow::error::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -52,21 +55,24 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to poll stream"))]
|
||||
#[snafu(display(""))]
|
||||
PollStream {
|
||||
source: datafusion::error::DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Fail to format record batch"))]
|
||||
Format {
|
||||
source: datatypes::arrow::error::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: datatypes::arrow::error::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to init Recordbatch stream"))]
|
||||
InitRecordbatchStream {
|
||||
source: datafusion_common::DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: datafusion_common::DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -76,7 +82,8 @@ pub enum Error {
|
||||
projection,
|
||||
))]
|
||||
ProjectArrowRecordBatch {
|
||||
source: datatypes::arrow::error::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: datatypes::arrow::error::ArrowError,
|
||||
location: Location,
|
||||
schema: datatypes::schema::SchemaRef,
|
||||
projection: Vec<usize>,
|
||||
|
||||
@@ -7,6 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
metrics.workspace = true
|
||||
once_cell.workspace = true
|
||||
|
||||
@@ -15,17 +15,20 @@
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
use tokio::task::JoinError;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub(crate)))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to build runtime"))]
|
||||
BuildRuntime {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -35,7 +38,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to wait for repeated task {} to stop", name))]
|
||||
WaitGcTaskStop {
|
||||
name: String,
|
||||
source: JoinError,
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ bytes = "1.1"
|
||||
catalog = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
|
||||
@@ -16,13 +16,15 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use prost::{DecodeError, EncodeError};
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported physical plan: {}", name))]
|
||||
UnsupportedPlan { name: String, location: Location },
|
||||
@@ -41,13 +43,15 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to decode substrait relation"))]
|
||||
DecodeRel {
|
||||
source: DecodeError,
|
||||
#[snafu(source)]
|
||||
error: DecodeError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode substrait relation"))]
|
||||
EncodeRel {
|
||||
source: EncodeError,
|
||||
#[snafu(source)]
|
||||
error: EncodeError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -69,7 +73,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Internal error from DataFusion"))]
|
||||
DFInternal {
|
||||
source: DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -110,13 +115,15 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to encode DataFusion plan"))]
|
||||
EncodeDfPlan {
|
||||
source: datafusion::error::DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to decode DataFusion plan"))]
|
||||
DecodeDfPlan {
|
||||
source: datafusion::error::DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -38,94 +38,40 @@ macro_rules! error {
|
||||
|
||||
// error!(e; target: "my_target", "a {} event", "log")
|
||||
($e:expr; target: $target:expr, $($arg:tt)+) => ({
|
||||
use $crate::common_error::ext::ErrorExt;
|
||||
use std::error::Error;
|
||||
match ($e.source(), $e.location_opt()) {
|
||||
(Some(source), Some(location)) => {
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.source = source,
|
||||
err.location = %location,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(Some(source), None) => {
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.source = source,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(None, Some(location)) => {
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.location = %location,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(None, None) => {
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
$($arg)+
|
||||
)
|
||||
}
|
||||
}
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
err = ?$e,
|
||||
$($arg)+
|
||||
)
|
||||
});
|
||||
|
||||
// error!(%e; target: "my_target", "a {} event", "log")
|
||||
(%$e:expr; target: $target:expr, $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
target: $target,
|
||||
$crate::logging::Level::ERROR,
|
||||
err = %$e,
|
||||
$($arg)+
|
||||
)
|
||||
});
|
||||
|
||||
// error!(e; "a {} event", "log")
|
||||
($e:expr; $($arg:tt)+) => ({
|
||||
use std::error::Error;
|
||||
use $crate::common_error::ext::ErrorExt;
|
||||
match ($e.source(), $e.location_opt()) {
|
||||
(Some(source), Some(location)) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.source = source,
|
||||
err.location = %location,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(Some(source), None) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.source = source,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(None, Some(location)) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.location = %location,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(None, None) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
$($arg)+
|
||||
)
|
||||
}
|
||||
}
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
err = ?$e,
|
||||
$($arg)+
|
||||
)
|
||||
});
|
||||
|
||||
// error!(%e; "a {} event", "log")
|
||||
(%$e:expr; $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
$crate::logging::Level::ERROR,
|
||||
err = %$e,
|
||||
$($arg)+
|
||||
)
|
||||
});
|
||||
|
||||
// error!("a {} event", "log")
|
||||
@@ -144,46 +90,20 @@ macro_rules! warn {
|
||||
|
||||
// warn!(e; "a {} event", "log")
|
||||
($e:expr; $($arg:tt)+) => ({
|
||||
use std::error::Error;
|
||||
use $crate::common_error::ext::ErrorExt;
|
||||
match ($e.source(), $e.location_opt()) {
|
||||
(Some(source), Some(location)) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.source = source,
|
||||
err.location = %location,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(Some(source), None) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.source = source,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(None, Some(location)) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
err.location = %location,
|
||||
$($arg)+
|
||||
)
|
||||
},
|
||||
(None, None) => {
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
err.msg = %$e,
|
||||
err.code = %$e.status_code(),
|
||||
$($arg)+
|
||||
)
|
||||
}
|
||||
}
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
err = ?$e,
|
||||
$($arg)+
|
||||
)
|
||||
});
|
||||
|
||||
// warn!(%e; "a {} event", "log")
|
||||
(%$e:expr; $($arg:tt)+) => ({
|
||||
$crate::log!(
|
||||
$crate::logging::Level::WARN,
|
||||
err = %$e,
|
||||
$($arg)+
|
||||
)
|
||||
});
|
||||
|
||||
// warn!("a {} event", "log")
|
||||
@@ -305,8 +225,10 @@ mod tests {
|
||||
error!(target: "my_target", "hello {}", "world");
|
||||
// Supports both owned and reference type.
|
||||
error!(err; target: "my_target", "hello {}", "world");
|
||||
error!(%err; target: "my_target", "hello {}", "world");
|
||||
error!(err_ref; target: "my_target", "hello {}", "world");
|
||||
error!(err_ref2; "hello {}", "world");
|
||||
error!(%err_ref2; "hello {}", "world");
|
||||
error!("hello {}", "world");
|
||||
|
||||
let root_err = MockError::with_source(err);
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
// metric stuffs, inspired by databend
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Once, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
@@ -63,6 +64,7 @@ pub fn try_handle() -> Option<PrometheusHandle> {
|
||||
pub struct Timer {
|
||||
start: Instant,
|
||||
histogram: Histogram,
|
||||
observed: bool,
|
||||
}
|
||||
|
||||
impl From<Histogram> for Timer {
|
||||
@@ -71,12 +73,22 @@ impl From<Histogram> for Timer {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Timer {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Timer")
|
||||
.field("start", &self.start)
|
||||
.field("observed", &self.observed)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Timer {
|
||||
/// Creates a timer from given histogram.
|
||||
pub fn from_histogram(histogram: Histogram) -> Self {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
histogram,
|
||||
observed: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,6 +97,7 @@ impl Timer {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
histogram: register_histogram!(name),
|
||||
observed: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +106,7 @@ impl Timer {
|
||||
Self {
|
||||
start: Instant::now(),
|
||||
histogram: register_histogram!(name, labels),
|
||||
observed: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,11 +114,18 @@ impl Timer {
|
||||
pub fn elapsed(&self) -> Duration {
|
||||
self.start.elapsed()
|
||||
}
|
||||
|
||||
/// Discards the timer result.
|
||||
pub fn discard(mut self) {
|
||||
self.observed = true;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Timer {
|
||||
fn drop(&mut self) {
|
||||
self.histogram.record(self.elapsed())
|
||||
if !self.observed {
|
||||
self.histogram.record(self.elapsed())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ arrow.workspace = true
|
||||
chrono-tz = "0.8"
|
||||
chrono.workspace = true
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -20,11 +20,12 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::{Error, InvalidDateStrSnafu, Result};
|
||||
use crate::util::{format_utc_datetime, local_datetime_to_utc};
|
||||
use crate::Date;
|
||||
|
||||
const DATETIME_FORMAT: &str = "%F %T";
|
||||
const DATETIME_FORMAT_WITH_TZ: &str = "%F %T%z";
|
||||
|
||||
/// [DateTime] represents the **seconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
|
||||
/// [DateTime] represents the **milliseconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
|
||||
#[derive(
|
||||
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||
)]
|
||||
@@ -32,7 +33,7 @@ pub struct DateTime(i64);
|
||||
|
||||
impl Display for DateTime {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
if let Some(abs_time) = NaiveDateTime::from_timestamp_opt(self.0, 0) {
|
||||
if let Some(abs_time) = NaiveDateTime::from_timestamp_millis(self.0) {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
@@ -52,7 +53,7 @@ impl From<DateTime> for serde_json::Value {
|
||||
|
||||
impl From<NaiveDateTime> for DateTime {
|
||||
fn from(value: NaiveDateTime) -> Self {
|
||||
DateTime::from(value.timestamp())
|
||||
DateTime::from(value.timestamp_millis())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,20 +62,20 @@ impl FromStr for DateTime {
|
||||
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
let s = s.trim();
|
||||
let timestamp = if let Ok(d) = NaiveDateTime::parse_from_str(s, DATETIME_FORMAT) {
|
||||
let timestamp_millis = if let Ok(d) = NaiveDateTime::parse_from_str(s, DATETIME_FORMAT) {
|
||||
match local_datetime_to_utc(&d) {
|
||||
LocalResult::None => {
|
||||
return InvalidDateStrSnafu { raw: s }.fail();
|
||||
}
|
||||
LocalResult::Single(d) | LocalResult::Ambiguous(d, _) => d.timestamp(),
|
||||
LocalResult::Single(d) | LocalResult::Ambiguous(d, _) => d.timestamp_millis(),
|
||||
}
|
||||
} else if let Ok(v) = chrono::DateTime::parse_from_str(s, DATETIME_FORMAT_WITH_TZ) {
|
||||
v.timestamp()
|
||||
v.timestamp_millis()
|
||||
} else {
|
||||
return InvalidDateStrSnafu { raw: s }.fail();
|
||||
};
|
||||
|
||||
Ok(Self(timestamp))
|
||||
Ok(Self(timestamp_millis))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,17 +85,32 @@ impl From<i64> for DateTime {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Date> for DateTime {
|
||||
fn from(value: Date) -> Self {
|
||||
// It's safe, i32 * 86400000 won't be overflow
|
||||
Self(value.to_secs() * 1000)
|
||||
}
|
||||
}
|
||||
|
||||
impl DateTime {
|
||||
pub fn new(val: i64) -> Self {
|
||||
Self(val)
|
||||
/// Create a new [DateTime] from milliseconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch).
|
||||
pub fn new(millis: i64) -> Self {
|
||||
Self(millis)
|
||||
}
|
||||
|
||||
/// Get the milliseconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch).
|
||||
pub fn val(&self) -> i64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Convert to [NaiveDateTime].
|
||||
pub fn to_chrono_datetime(&self) -> Option<NaiveDateTime> {
|
||||
NaiveDateTime::from_timestamp_opt(self.0, 0)
|
||||
NaiveDateTime::from_timestamp_millis(self.0)
|
||||
}
|
||||
|
||||
/// Convert to [common_time::date].
|
||||
pub fn to_date(&self) -> Option<Date> {
|
||||
self.to_chrono_datetime().map(|d| Date::from(d.date()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,8 +122,8 @@ mod tests {
|
||||
pub fn test_new_date_time() {
|
||||
std::env::set_var("TZ", "Asia/Shanghai");
|
||||
assert_eq!("1970-01-01 08:00:00+0800", DateTime::new(0).to_string());
|
||||
assert_eq!("1970-01-01 08:00:01+0800", DateTime::new(1).to_string());
|
||||
assert_eq!("1970-01-01 07:59:59+0800", DateTime::new(-1).to_string());
|
||||
assert_eq!("1970-01-01 08:00:01+0800", DateTime::new(1000).to_string());
|
||||
assert_eq!("1970-01-01 07:59:59+0800", DateTime::new(-1000).to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -130,7 +146,7 @@ mod tests {
|
||||
fn test_parse_local_date_time() {
|
||||
std::env::set_var("TZ", "Asia/Shanghai");
|
||||
assert_eq!(
|
||||
-28800,
|
||||
-28800000,
|
||||
DateTime::from_str("1970-01-01 00:00:00").unwrap().val()
|
||||
);
|
||||
assert_eq!(0, DateTime::from_str("1970-01-01 08:00:00").unwrap().val());
|
||||
@@ -141,6 +157,24 @@ mod tests {
|
||||
let ts = DateTime::from_str("1970-01-01 08:00:00+0000")
|
||||
.unwrap()
|
||||
.val();
|
||||
assert_eq!(28800, ts);
|
||||
assert_eq!(28800000, ts);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_max_date() {
|
||||
let date = Date::new(i32::MAX);
|
||||
let datetime = DateTime::from(date);
|
||||
assert_eq!(datetime.val(), 185542587100800000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conversion_between_datetime_and_chrono_datetime() {
|
||||
let cases = [1, 10, 100, 1000, 100000];
|
||||
for case in cases {
|
||||
let dt = DateTime::new(case);
|
||||
let ndt = dt.to_chrono_datetime().unwrap();
|
||||
let dt2 = DateTime::from(ndt);
|
||||
assert_eq!(dt, dt2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,19 @@ use std::num::{ParseIntError, TryFromIntError};
|
||||
use chrono::ParseError;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to parse string to date, raw: {}", raw))]
|
||||
ParseDateStr { raw: String, source: ParseError },
|
||||
ParseDateStr {
|
||||
raw: String,
|
||||
#[snafu(source)]
|
||||
error: ParseError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid date string, raw: {}", raw))]
|
||||
InvalidDateStr { raw: String, location: Location },
|
||||
@@ -37,7 +43,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Current timestamp overflow"))]
|
||||
TimestampOverflow {
|
||||
source: TryFromIntError,
|
||||
#[snafu(source)]
|
||||
error: TryFromIntError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -54,7 +61,8 @@ pub enum Error {
|
||||
#[snafu(display("Invalid offset string {raw}: "))]
|
||||
ParseOffsetStr {
|
||||
raw: String,
|
||||
source: ParseIntError,
|
||||
#[snafu(source)]
|
||||
error: ParseIntError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ common-function = { workspace = true }
|
||||
common-greptimedb-telemetry = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-grpc-expr = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
|
||||
@@ -37,7 +37,7 @@ use storage::config::{
|
||||
};
|
||||
use storage::scheduler::SchedulerConfig;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize(1024);
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::mb(256);
|
||||
|
||||
/// Default data home in file storage
|
||||
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
|
||||
@@ -86,10 +86,19 @@ impl Default for StorageConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Default, Deserialize, Eq, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct FileConfig {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(default)]
|
||||
pub struct ObjectStorageCacheConfig {
|
||||
/// The local file cache directory
|
||||
pub cache_path: Option<String>,
|
||||
/// The cache capacity in bytes
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct S3Config {
|
||||
@@ -101,8 +110,8 @@ pub struct S3Config {
|
||||
pub secret_access_key: SecretString,
|
||||
pub endpoint: Option<String>,
|
||||
pub region: Option<String>,
|
||||
pub cache_path: Option<String>,
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -115,8 +124,8 @@ pub struct OssConfig {
|
||||
#[serde(skip_serializing)]
|
||||
pub access_key_secret: SecretString,
|
||||
pub endpoint: String,
|
||||
pub cache_path: Option<String>,
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -130,8 +139,8 @@ pub struct AzblobConfig {
|
||||
pub account_key: SecretString,
|
||||
pub endpoint: String,
|
||||
pub sas_token: Option<String>,
|
||||
pub cache_path: Option<String>,
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -143,8 +152,8 @@ pub struct GcsConfig {
|
||||
#[serde(skip_serializing)]
|
||||
pub credential_path: SecretString,
|
||||
pub endpoint: String,
|
||||
pub cache_path: Option<String>,
|
||||
pub cache_capacity: Option<ReadableSize>,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
}
|
||||
|
||||
impl Default for S3Config {
|
||||
@@ -156,8 +165,7 @@ impl Default for S3Config {
|
||||
secret_access_key: SecretString::from(String::default()),
|
||||
endpoint: Option::default(),
|
||||
region: Option::default(),
|
||||
cache_path: Option::default(),
|
||||
cache_capacity: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,8 +178,7 @@ impl Default for OssConfig {
|
||||
access_key_id: SecretString::from(String::default()),
|
||||
access_key_secret: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache_path: Option::default(),
|
||||
cache_capacity: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -184,9 +191,8 @@ impl Default for AzblobConfig {
|
||||
account_name: SecretString::from(String::default()),
|
||||
account_key: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache_path: Option::default(),
|
||||
cache_capacity: Option::default(),
|
||||
sas_token: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -199,8 +205,7 @@ impl Default for GcsConfig {
|
||||
scope: String::default(),
|
||||
credential_path: SecretString::from(String::default()),
|
||||
endpoint: String::default(),
|
||||
cache_path: Option::default(),
|
||||
cache_capacity: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +383,7 @@ impl DatanodeOptions {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub enum RegionEngineConfig {
|
||||
#[serde(rename = "mito")]
|
||||
Mito(MitoConfig),
|
||||
|
||||
@@ -14,13 +14,11 @@
|
||||
|
||||
//! Datanode implementation.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use catalog::memory::MemoryCatalogManager;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::Plugins;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
|
||||
@@ -63,8 +61,6 @@ use crate::region_server::RegionServer;
|
||||
use crate::server::Services;
|
||||
use crate::store;
|
||||
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize(1024);
|
||||
|
||||
const OPEN_REGION_PARALLELISM: usize = 16;
|
||||
|
||||
/// Datanode service.
|
||||
@@ -76,6 +72,7 @@ pub struct Datanode {
|
||||
region_server: RegionServer,
|
||||
greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
|
||||
leases_notifier: Option<Arc<Notify>>,
|
||||
plugins: Plugins,
|
||||
}
|
||||
|
||||
impl Datanode {
|
||||
@@ -141,11 +138,15 @@ impl Datanode {
|
||||
pub fn region_server(&self) -> RegionServer {
|
||||
self.region_server.clone()
|
||||
}
|
||||
|
||||
pub fn plugins(&self) -> Plugins {
|
||||
self.plugins.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DatanodeBuilder {
|
||||
opts: DatanodeOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
plugins: Plugins,
|
||||
meta_client: Option<MetaClient>,
|
||||
kv_backend: Option<KvBackendRef>,
|
||||
}
|
||||
@@ -153,11 +154,7 @@ pub struct DatanodeBuilder {
|
||||
impl DatanodeBuilder {
|
||||
/// `kv_backend` is optional. If absent, the builder will try to build one
|
||||
/// by using the given `opts`
|
||||
pub fn new(
|
||||
opts: DatanodeOptions,
|
||||
kv_backend: Option<KvBackendRef>,
|
||||
plugins: Arc<Plugins>,
|
||||
) -> Self {
|
||||
pub fn new(opts: DatanodeOptions, kv_backend: Option<KvBackendRef>, plugins: Plugins) -> Self {
|
||||
Self {
|
||||
opts,
|
||||
plugins,
|
||||
@@ -266,6 +263,7 @@ impl DatanodeBuilder {
|
||||
greptimedb_telemetry_task,
|
||||
region_event_receiver,
|
||||
leases_notifier,
|
||||
plugins: self.plugins.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -286,8 +284,9 @@ impl DatanodeBuilder {
|
||||
for region_number in table_value.regions {
|
||||
regions.push((
|
||||
RegionId::new(table_value.table_id, region_number),
|
||||
table_value.engine.clone(),
|
||||
table_value.region_storage_path.clone(),
|
||||
table_value.region_info.engine.clone(),
|
||||
table_value.region_info.region_storage_path.clone(),
|
||||
table_value.region_info.region_options.clone(),
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -296,7 +295,7 @@ impl DatanodeBuilder {
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(OPEN_REGION_PARALLELISM));
|
||||
let mut tasks = vec![];
|
||||
|
||||
for (region_id, engine, store_path) in regions {
|
||||
for (region_id, engine, store_path, options) in regions {
|
||||
let region_dir = region_dir(&store_path, region_id);
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
@@ -307,7 +306,7 @@ impl DatanodeBuilder {
|
||||
RegionRequest::Open(RegionOpenRequest {
|
||||
engine: engine.clone(),
|
||||
region_dir,
|
||||
options: HashMap::new(),
|
||||
options,
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
@@ -330,7 +329,7 @@ impl DatanodeBuilder {
|
||||
|
||||
async fn new_region_server(
|
||||
opts: &DatanodeOptions,
|
||||
plugins: Arc<Plugins>,
|
||||
plugins: Plugins,
|
||||
log_store: Arc<RaftEngineLogStore>,
|
||||
event_listener: RegionServerEventListenerRef,
|
||||
) -> Result<RegionServer> {
|
||||
@@ -338,6 +337,7 @@ impl DatanodeBuilder {
|
||||
// query engine in datanode only executes plan with resolved table source.
|
||||
MemoryCatalogManager::with_default_setup(),
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins,
|
||||
);
|
||||
@@ -362,6 +362,8 @@ impl DatanodeBuilder {
|
||||
Ok(region_server)
|
||||
}
|
||||
|
||||
// internal utils
|
||||
|
||||
/// Build [RaftEngineLogStore]
|
||||
async fn build_log_store(opts: &DatanodeOptions) -> Result<Arc<RaftEngineLogStore>> {
|
||||
let data_home = normalize_dir(&opts.storage.data_home);
|
||||
@@ -409,3 +411,80 @@ impl DatanodeBuilder {
|
||||
Ok(engines)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::Plugins;
|
||||
use common_meta::key::datanode_table::DatanodeTableManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use store_api::region_request::RegionRequest;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::config::DatanodeOptions;
|
||||
use crate::datanode::DatanodeBuilder;
|
||||
use crate::tests::{mock_region_server, MockRegionEngine};
|
||||
|
||||
async fn setup_table_datanode(kv: &KvBackendRef) {
|
||||
let mgr = DatanodeTableManager::new(kv.clone());
|
||||
let txn = mgr
|
||||
.build_create_txn(
|
||||
1028,
|
||||
"mock",
|
||||
"foo/bar/weny",
|
||||
HashMap::from([("foo".to_string(), "bar".to_string())]),
|
||||
BTreeMap::from([(0, vec![0, 1, 2])]),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let r = kv.txn(txn).await.unwrap();
|
||||
assert!(r.succeeded);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_region_server() {
|
||||
let mut mock_region_server = mock_region_server();
|
||||
let (mock_region, mut mock_region_handler) = MockRegionEngine::new();
|
||||
|
||||
mock_region_server.register_engine(mock_region.clone());
|
||||
|
||||
let builder = DatanodeBuilder::new(
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
..Default::default()
|
||||
},
|
||||
None,
|
||||
Plugins::default(),
|
||||
);
|
||||
|
||||
let kv = Arc::new(MemoryKvBackend::default()) as _;
|
||||
setup_table_datanode(&kv).await;
|
||||
|
||||
builder
|
||||
.initialize_region_server(&mock_region_server, kv.clone(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for i in 0..3 {
|
||||
let (region_id, req) = mock_region_handler.recv().await.unwrap();
|
||||
assert_eq!(region_id, RegionId::new(1028, i));
|
||||
if let RegionRequest::Open(req) = req {
|
||||
assert_eq!(
|
||||
req.options,
|
||||
HashMap::from([("foo".to_string(), "bar".to_string())])
|
||||
)
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
assert_matches!(
|
||||
mock_region_handler.try_recv(),
|
||||
Err(tokio::sync::mpsc::error::TryRecvError::Empty)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_procedure::ProcedureId;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use servers::define_into_tonic_status;
|
||||
@@ -24,8 +25,9 @@ use store_api::storage::RegionId;
|
||||
use table::error::Error as TableError;
|
||||
|
||||
/// Business error of datanode.
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to handle heartbeat response"))]
|
||||
HandleHeartbeatResponse {
|
||||
@@ -170,14 +172,23 @@ pub enum Error {
|
||||
#[snafu(display("Failed to parse address {}", addr))]
|
||||
ParseAddr {
|
||||
addr: String,
|
||||
source: std::net::AddrParseError,
|
||||
#[snafu(source)]
|
||||
error: std::net::AddrParseError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir { dir: String, source: std::io::Error },
|
||||
CreateDir {
|
||||
dir: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to remove directory {}", dir))]
|
||||
RemoveDir { dir: String, source: std::io::Error },
|
||||
RemoveDir {
|
||||
dir: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open log store"))]
|
||||
OpenLogStore {
|
||||
@@ -187,7 +198,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to init backend"))]
|
||||
InitBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -340,7 +352,8 @@ pub enum Error {
|
||||
#[snafu(display("Failed to encode object into json"))]
|
||||
EncodeJson {
|
||||
location: Location,
|
||||
source: JsonError,
|
||||
#[snafu(source)]
|
||||
error: JsonError,
|
||||
},
|
||||
|
||||
#[snafu(display("Payload not exist"))]
|
||||
@@ -351,7 +364,8 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to join task"))]
|
||||
JoinTask {
|
||||
source: common_runtime::JoinError,
|
||||
#[snafu(source)]
|
||||
error: common_runtime::JoinError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, Peer, RegionStat, Role};
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
use common_meta::distributed_time_constants::META_KEEP_ALIVE_INTERVAL_SECS;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::{
|
||||
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
|
||||
@@ -97,6 +98,7 @@ impl HeartbeatTask {
|
||||
handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||
mailbox: MailboxRef,
|
||||
mut notify: Option<Arc<Notify>>,
|
||||
quit_signal: Arc<Notify>,
|
||||
) -> Result<HeartbeatSender> {
|
||||
let client_id = meta_client.id();
|
||||
|
||||
@@ -123,7 +125,8 @@ impl HeartbeatTask {
|
||||
info!("Heartbeat task shutdown");
|
||||
}
|
||||
}
|
||||
info!("Heartbeat handling loop exit.")
|
||||
quit_signal.notify_one();
|
||||
info!("Heartbeat handling loop exit.");
|
||||
});
|
||||
Ok(tx)
|
||||
}
|
||||
@@ -167,12 +170,15 @@ impl HeartbeatTask {
|
||||
let (outgoing_tx, mut outgoing_rx) = mpsc::channel(16);
|
||||
let mailbox = Arc::new(HeartbeatMailbox::new(outgoing_tx));
|
||||
|
||||
let quit_signal = Arc::new(tokio::sync::Notify::new());
|
||||
|
||||
let mut tx = Self::create_streams(
|
||||
&meta_client,
|
||||
running.clone(),
|
||||
handler_executor.clone(),
|
||||
mailbox.clone(),
|
||||
notify,
|
||||
quit_signal.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -187,7 +193,6 @@ impl HeartbeatTask {
|
||||
common_runtime::spawn_bg(async move {
|
||||
let sleep = tokio::time::sleep(Duration::from_millis(0));
|
||||
tokio::pin!(sleep);
|
||||
|
||||
loop {
|
||||
if !running.load(Ordering::Relaxed) {
|
||||
info!("shutdown heartbeat task");
|
||||
@@ -228,6 +233,11 @@ impl HeartbeatTask {
|
||||
sleep.as_mut().reset(now + Duration::from_millis(interval));
|
||||
Some(req)
|
||||
}
|
||||
// If the heartbeat stream is broken, send a dummy heartbeat request to re-create the heartbeat stream.
|
||||
_ = quit_signal.notified() => {
|
||||
let req = HeartbeatRequest::default();
|
||||
Some(req)
|
||||
}
|
||||
};
|
||||
if let Some(req) = req {
|
||||
debug!("Sending heartbeat request: {:?}", req);
|
||||
@@ -239,14 +249,24 @@ impl HeartbeatTask {
|
||||
handler_executor.clone(),
|
||||
mailbox.clone(),
|
||||
None,
|
||||
quit_signal.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(new_tx) => {
|
||||
info!("Reconnected to metasrv");
|
||||
tx = new_tx;
|
||||
// Triggers to send heartbeat immediately.
|
||||
sleep.as_mut().reset(Instant::now());
|
||||
}
|
||||
Err(e) => {
|
||||
// Before the META_LEASE_SECS expires,
|
||||
// any retries are meaningless, it always reads the old meta leader address.
|
||||
// Triggers to retry after META_KEEP_ALIVE_INTERVAL_SECS.
|
||||
sleep.as_mut().reset(
|
||||
Instant::now()
|
||||
+ Duration::from_secs(META_KEEP_ALIVE_INTERVAL_SECS),
|
||||
);
|
||||
error!(e;"Failed to reconnect to metasrv!");
|
||||
}
|
||||
}
|
||||
@@ -315,13 +335,19 @@ pub async fn new_metasrv_client(
|
||||
.timeout(Duration::from_millis(meta_config.timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
|
||||
.tcp_nodelay(meta_config.tcp_nodelay);
|
||||
let channel_manager = ChannelManager::with_config(config);
|
||||
let channel_manager = ChannelManager::with_config(config.clone());
|
||||
let heartbeat_channel_manager = ChannelManager::with_config(
|
||||
config
|
||||
.timeout(Duration::from_millis(meta_config.heartbeat_timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.heartbeat_timeout_millis)),
|
||||
);
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(cluster_id, member_id, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_router()
|
||||
.enable_store()
|
||||
.channel_manager(channel_manager)
|
||||
.heartbeat_channel_manager(heartbeat_channel_manager)
|
||||
.build();
|
||||
meta_client
|
||||
.start(&meta_config.metasrv_addrs)
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
@@ -49,12 +47,13 @@ impl RegionHeartbeatResponseHandler {
|
||||
Instruction::OpenRegion(OpenRegion {
|
||||
region_ident,
|
||||
region_storage_path,
|
||||
options,
|
||||
}) => {
|
||||
let region_id = Self::region_ident_to_region_id(®ion_ident);
|
||||
let open_region_req = RegionRequest::Open(RegionOpenRequest {
|
||||
engine: region_ident.engine,
|
||||
region_dir: region_dir(®ion_storage_path, region_id),
|
||||
options: HashMap::new(),
|
||||
options,
|
||||
});
|
||||
Ok((region_id, open_region_req))
|
||||
}
|
||||
|
||||
@@ -14,5 +14,7 @@
|
||||
|
||||
//! datanode metrics
|
||||
|
||||
pub const HANDLE_SQL_ELAPSED: &str = "datanode.handle_sql_elapsed";
|
||||
pub const HANDLE_PROMQL_ELAPSED: &str = "datanode.handle_promql_elapsed";
|
||||
/// The elapsed time of handling a request in the region_server.
|
||||
pub const HANDLE_REGION_REQUEST_ELAPSED: &str = "datanode.handle_region_request_elapsed";
|
||||
/// Region request type label.
|
||||
pub const REGION_REQUEST_TYPE: &str = "datanode.region_request_type";
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user