Compare commits

..

13 Commits

Author SHA1 Message Date
discord9
d815bdf770 refactor: per reviews 2024-05-27 14:27:21 +08:00
discord9
0b324563ac fix: range begin>range end 2024-05-27 14:27:21 +08:00
discord9
ffecb6882e mend 2024-05-27 14:27:21 +08:00
discord9
c185242997 fix: window_start when ts<start_time 2024-05-27 14:27:21 +08:00
discord9
9fda415b0d fix: ts<start time correct window 2024-05-27 14:27:21 +08:00
discord9
5eaf9816b9 fix: send buf clear 2024-05-27 14:27:21 +08:00
discord9
6684f8dce3 fix: test of tumble 2024-05-27 14:27:21 +08:00
discord9
e8660a6f7e fix: expire state 2024-05-27 14:27:21 +08:00
discord9
6659f3cc62 fix: reorder write requests 2024-05-27 14:27:21 +08:00
discord9
d218d65361 fix: default timestamp name 2024-05-27 14:27:21 +08:00
discord9
8f40ba42c1 feat: rename default ts to GREPTIME_TIMESTAMP 2024-05-27 14:27:21 +08:00
discord9
d1ce436442 fix(WIP): choose 2024-05-27 14:27:21 +08:00
discord9
e580ba63ec fix: optional args of tumble 2024-05-27 14:27:21 +08:00
385 changed files with 6280 additions and 16219 deletions

View File

@@ -57,7 +57,6 @@ runs:
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/values.yaml \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB

View File

@@ -1,18 +0,0 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8

View File

@@ -155,22 +155,19 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
cache-targets: "false"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
cargo +nightly install cargo-fuzz
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
@@ -185,7 +182,7 @@ jobs:
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build-greptime-ci
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
@@ -203,27 +200,24 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
cache-targets: "false"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binariy
cargo install cargo-fuzz
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bin
name: bins
path: .
- name: Unzip bianry
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Run Fuzz Test
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bin/greptime
GT_FUZZ_BINARY_PATH: ./bins/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
@@ -262,7 +256,7 @@ jobs:
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
run: cargo build --bin greptime --profile ci
- name: Pack greptime binary
shell: bash
run: |
@@ -301,13 +295,12 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
cache-targets: "false"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
cargo +nightly install cargo-fuzz
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
@@ -315,9 +308,7 @@ jobs:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
run: tar -xvf ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
@@ -363,29 +354,15 @@ jobs:
name: fuzz-tests-kind-logs-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness:
name: Sqlness Test (${{ matrix.mode.name }})
name: Sqlness Test
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -396,17 +373,43 @@ jobs:
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- if: matrix.mode.kafka
name: Setup kafka server
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/sqlness*
retention-days: 3
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs
if: failure()
if: always()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-${{ matrix.mode.name }}
name: sqlness-logs-with-kafka-wal
path: /tmp/sqlness*
retention-days: 3
@@ -495,9 +498,6 @@ jobs:
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env:
@@ -508,11 +508,6 @@ jobs:
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs"

1642
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -64,7 +64,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.8.1"
version = "0.8.0"
edition = "2021"
license = "Apache-2.0"
@@ -104,15 +104,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
derive_builder = "0.12"
dotenv = "0.15"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
@@ -120,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "902f75fdd170c572e90b1f640161d90995f20218" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
@@ -146,15 +146,13 @@ raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
regex = "1.8"
regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [
reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls-native-roots",
"stream",
"multipart",
] }
rskafka = "0.5"
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
@@ -164,7 +162,7 @@ smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
@@ -174,7 +172,6 @@ tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = { version = "0.4" }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
@@ -235,6 +232,8 @@ sql = { path = "src/sql" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
# TODO some code depends on this
tests-integration = { path = "tests-integration" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
@@ -252,12 +251,9 @@ incremental = false
[profile.ci]
inherits = "dev"
debug = false
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -163,13 +163,6 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test.
cargo sqlness
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: check
check: ## Cargo check all the targets.
cargo check --workspace --all-targets --all-features
@@ -201,10 +194,6 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: run-cluster-with-etcd
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \

View File

@@ -12,6 +12,7 @@ api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
client.workspace = true
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
@@ -32,6 +33,8 @@ rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
# TODO depend `Database` client
tests-integration.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -28,7 +28,6 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
@@ -211,7 +210,7 @@ impl From<Args> for Config {
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
provider: Provider,
wal_options: WalOptions,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
@@ -228,14 +227,10 @@ impl Region {
num_rows: u32,
rng_seed: u64,
) -> Self {
let provider = match wal_options {
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
};
Self {
id,
schema,
provider,
wal_options,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
@@ -263,14 +258,14 @@ impl Region {
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
&self.provider,
&self.wal_options,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);

View File

@@ -13,10 +13,6 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
@@ -158,10 +154,6 @@
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -248,10 +240,6 @@
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -312,10 +300,6 @@
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |

View File

@@ -32,15 +32,6 @@ rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.

View File

@@ -5,15 +5,6 @@ mode = "standalone"
## +toml2docs:none-default
default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.

View File

@@ -25,15 +25,6 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## Procedure storage options.
[procedure]

View File

@@ -8,15 +8,6 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## The HTTP server options.
[http]
## The address to bind the HTTP server.

View File

@@ -1,102 +0,0 @@
x-custom:
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
common_settings: &common_settings
image: quay.io/coreos/etcd:v3.5.10
entrypoint: /usr/local/bin/etcd
services:
etcd0:
<<: *common_settings
container_name: etcd0
ports:
- 2379:2379
- 2380:2380
command:
- --name=etcd0
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=http://etcd0:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --listen-client-urls=http://0.0.0.0:2379
- --advertise-client-urls=http://etcd0:2379
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb
metasrv:
image: docker.io/greptime/greptimedb:latest
container_name: metasrv
ports:
- 3002:3002
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
etcd0:
condition: service_healthy
networks:
- greptimedb
datanode0:
image: docker.io/greptime/greptimedb:latest
container_name: datanode0
ports:
- 3001:3001
command:
- datanode
- start
- --node-id=0
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addr=metasrv:3002
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
frontend0:
image: docker.io/greptime/greptimedb:latest
container_name: frontend0
ports:
- 4000:4000
- 4001:4001
- 4002:4002
- 4003:4003
command:
- frontend
- start
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
networks:
greptimedb:
name: greptimedb

View File

@@ -11,4 +11,3 @@ common-macro.workspace = true
common-meta.workspace = true
moka.workspace = true
snafu.workspace = true
substrait.workspace = true

15
src/cache/src/lib.rs vendored
View File

@@ -20,8 +20,7 @@ use std::time::Duration;
use catalog::kvbackend::new_table_cache;
use common_meta::cache::{
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
LayeredCacheRegistryBuilder,
new_table_route_cache, CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder,
};
use common_meta::kv_backend::KvBackendRef;
use moka::future::CacheBuilder;
@@ -34,7 +33,6 @@ const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
pub const TABLE_CACHE_NAME: &str = "table_cache";
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
@@ -84,22 +82,11 @@ pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegist
cache,
kv_backend.clone(),
));
// Builds the view info cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let view_info_cache = Arc::new(new_view_info_cache(
VIEW_INFO_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
CacheRegistryBuilder::default()
.add_cache(table_info_cache)
.add_cache(table_name_cache)
.add_cache(table_route_cache)
.add_cache(view_info_cache)
.add_cache(table_flownode_set_cache)
.build()
}

View File

@@ -16,7 +16,6 @@ arrow.workspace = true
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
bytes.workspace = true
common-catalog.workspace = true
common-config.workspace = true
common-error.workspace = true
@@ -49,11 +48,8 @@ table.workspace = true
tokio.workspace = true
[dev-dependencies]
cache.workspace = true
catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true
common-meta = { workspace = true, features = ["testing"] }
common-query = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
log-store.workspace = true
object-store.workspace = true

View File

@@ -19,7 +19,10 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
use table::metadata::TableId;
use tokio::task::JoinError;
#[derive(Snafu)]
#[snafu(visibility(pub))]
@@ -62,6 +65,19 @@ pub enum Error {
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to open system catalog table"))]
OpenSystemCatalog {
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table"))]
CreateSystemCatalog {
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable {
@@ -78,6 +94,52 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"System catalog table type mismatch, expected: binary, found: {:?}",
data_type,
))]
SystemCatalogTypeMismatch {
data_type: ConcreteDataType,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
InvalidEntryType {
entry_type: Option<u8>,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Invalid system catalog key: {:?}", key))]
InvalidKey {
key: Option<String>,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Catalog value is not present"))]
EmptyValue {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to deserialize value"))]
ValueDeserialize {
#[snafu(source)]
error: serde_json::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table engine not found: {}", engine_name))]
TableEngineNotFound {
engine_name: String,
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
@@ -107,9 +169,44 @@ pub enum Error {
location: Location,
},
#[snafu(display("View info not found: {}", name))]
ViewInfoNotFound {
name: String,
#[snafu(display("Schema {} already exists", schema))]
SchemaExists {
schema: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Operation {} not implemented yet", operation))]
Unimplemented {
operation: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Operation {} not supported", op))]
NotSupported {
op: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to open table {table_id}"))]
OpenTable {
table_id: TableId,
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to open table in parallel"))]
ParallelOpenTable {
#[snafu(source)]
error: JoinError,
},
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
table_info: String,
#[snafu(implicit)]
location: Location,
},
@@ -120,6 +217,13 @@ pub enum Error {
#[snafu(display("Failed to find region routes"))]
FindRegionRoutes { source: partition::error::Error },
#[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog {
#[snafu(implicit)]
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to create recordbatch"))]
CreateRecordBatch {
#[snafu(implicit)]
@@ -127,6 +231,20 @@ pub enum Error {
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to insert table creation record to system catalog"))]
InsertCatalogRecord {
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to scan system catalog table"))]
SystemCatalogTableScan {
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("Internal error"))]
Internal {
#[snafu(implicit)]
@@ -140,14 +258,20 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to decode logical plan for view: {}", name))]
DecodePlan {
name: String,
#[snafu(display("Failed to execute system catalog table scan"))]
SystemCatalogTableScanExec {
#[snafu(implicit)]
location: Location,
source: common_query::error::Error,
},
#[snafu(display("Cannot parse catalog value"))]
InvalidCatalogValue {
#[snafu(implicit)]
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation"))]
Metasrv {
#[snafu(implicit)]
@@ -173,6 +297,20 @@ pub enum Error {
location: Location,
},
#[snafu(display("Table schema mismatch"))]
TableSchemaMismatch {
#[snafu(implicit)]
location: Location,
source: table::error::Error,
},
#[snafu(display("A generic error has occurred, msg: {}", msg))]
Generic {
msg: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table metadata manager error"))]
TableMetadataManager {
source: common_meta::error::Error,
@@ -186,26 +324,6 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to get view info from cache"))]
GetViewCache {
source: common_meta::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cache not found: {name}"))]
CacheNotFound {
name: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to cast the catalog manager"))]
CastManager {
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -213,43 +331,61 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::SchemaNotFound { .. }
Error::InvalidKey { .. }
| Error::SchemaNotFound { .. }
| Error::CatalogNotFound { .. }
| Error::FindPartitions { .. }
| Error::FindRegionRoutes { .. }
| Error::CacheNotFound { .. }
| Error::CastManager { .. } => StatusCode::Unexpected,
| Error::InvalidEntryType { .. }
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::Generic { .. }
| Error::SystemCatalogTypeMismatch { .. }
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
source.status_code()
}
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::CreateRecordBatch { source, .. } => source.status_code(),
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::TableNotExist { .. } => StatusCode::TableNotFound,
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
StatusCode::InvalidArguments
}
Error::ListCatalogs { source, .. }
| Error::ListNodes { source, .. }
| Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } => source.status_code(),
Error::CreateTable { source, .. } => source.status_code(),
Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(),
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
source.status_code()
}
Error::GetTableCache { .. } => StatusCode::Internal,
}
}
@@ -281,6 +417,11 @@ mod tests {
.status_code()
);
assert_eq!(
StatusCode::Unexpected,
InvalidKeySnafu { key: None }.build().status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
Error::SystemCatalog {
@@ -289,6 +430,19 @@ mod tests {
}
.status_code()
);
assert_eq!(
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: ConcreteDataType::binary_datatype(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
EmptyValueSnafu {}.build().status_code()
);
}
#[test]

View File

@@ -22,13 +22,14 @@ use common_catalog::consts::{
};
use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
use common_meta::cache::TableRouteCacheRef;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::table_name::TableName;
use futures_util::stream::BoxStream;
use futures_util::{StreamExt, TryStreamExt};
use meta_client::client::MetaClient;
@@ -37,12 +38,11 @@ use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use snafu::prelude::*;
use table::dist_table::DistTable;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::table_name::TableName;
use table::TableRef;
use crate::error::{
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu,
ListTablesSnafu, Result, TableMetadataManagerSnafu,
};
use crate::information_schema::InformationSchemaProvider;
use crate::kvbackend::TableCacheRef;
@@ -61,26 +61,25 @@ pub struct KvBackendCatalogManager {
table_metadata_manager: TableMetadataManagerRef,
/// A sub-CatalogManager that handles system tables
system_catalog: SystemCatalog,
cache_registry: LayeredCacheRegistryRef,
table_cache: TableCacheRef,
}
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
impl KvBackendCatalogManager {
pub fn new(
pub async fn new(
mode: Mode,
meta_client: Option<Arc<MetaClient>>,
backend: KvBackendRef,
cache_registry: LayeredCacheRegistryRef,
table_cache: TableCacheRef,
table_route_cache: TableRouteCacheRef,
) -> Arc<Self> {
Arc::new_cyclic(|me| Self {
mode,
meta_client,
partition_manager: Arc::new(PartitionRuleManager::new(
backend.clone(),
cache_registry
.get()
.expect("Failed to get table_route_cache"),
table_route_cache,
)),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
system_catalog: SystemCatalog {
@@ -91,7 +90,7 @@ impl KvBackendCatalogManager {
me.clone(),
)),
},
cache_registry,
table_cache,
})
}
@@ -100,12 +99,6 @@ impl KvBackendCatalogManager {
&self.mode
}
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
self.cache_registry.get().context(CacheNotFoundSnafu {
name: "view_info_cache",
})
}
/// Returns the `[MetaClient]`.
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
self.meta_client.clone()
@@ -222,11 +215,7 @@ impl CatalogManager for KvBackendCatalogManager {
return Ok(Some(table));
}
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
name: "table_cache",
})?;
table_cache
self.table_cache
.get_by_ref(&TableName {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),

View File

@@ -17,11 +17,11 @@ use std::sync::Arc;
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
use common_meta::instruction::CacheIdent;
use common_meta::table_name::TableName;
use futures::future::BoxFuture;
use moka::future::Cache;
use snafu::OptionExt;
use table::dist_table::DistTable;
use table::table_name::TableName;
use table::TableRef;
pub type TableCacheRef = Arc<TableCache>;

View File

@@ -15,25 +15,15 @@
use std::collections::HashMap;
use std::sync::Arc;
use bytes::Bytes;
use common_catalog::format_full_table_name;
use common_query::logical_plan::SubstraitPlanDecoderRef;
use datafusion::common::{ResolvedTableReference, TableReference};
use datafusion::datasource::view::ViewTable;
use datafusion::datasource::{provider_as_source, TableProvider};
use datafusion::datasource::provider_as_source;
use datafusion::logical_expr::TableSource;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableType;
use snafu::{ensure, OptionExt};
use table::table::adapter::DfTableProviderAdapter;
mod dummy_catalog;
use dummy_catalog::DummyCatalogList;
use crate::error::{
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
};
use crate::kvbackend::KvBackendCatalogManager;
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
use crate::CatalogManagerRef;
pub struct DfTableSourceProvider {
@@ -42,7 +32,6 @@ pub struct DfTableSourceProvider {
disallow_cross_catalog_query: bool,
default_catalog: String,
default_schema: String,
plan_decoder: SubstraitPlanDecoderRef,
}
impl DfTableSourceProvider {
@@ -50,7 +39,6 @@ impl DfTableSourceProvider {
catalog_manager: CatalogManagerRef,
disallow_cross_catalog_query: bool,
query_ctx: &QueryContext,
plan_decoder: SubstraitPlanDecoderRef,
) -> Self {
Self {
catalog_manager,
@@ -58,7 +46,6 @@ impl DfTableSourceProvider {
resolved_tables: HashMap::new(),
default_catalog: query_ctx.current_catalog().to_owned(),
default_schema: query_ctx.current_schema().to_owned(),
plan_decoder,
}
}
@@ -107,39 +94,8 @@ impl DfTableSourceProvider {
table: format_full_table_name(catalog_name, schema_name, table_name),
})?;
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
let catalog_manager = self
.catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
.context(CastManagerSnafu)?;
let view_info = catalog_manager
.view_info_cache()?
.get(table.table_info().ident.table_id)
.await
.context(GetViewCacheSnafu)?
.context(ViewInfoNotFoundSnafu {
name: &table.table_info().name,
})?;
// Build the catalog list provider for deserialization.
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
let logical_plan = self
.plan_decoder
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
.await
.context(DecodePlanSnafu {
name: &table.table_info().name,
})?;
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
} else {
Arc::new(DfTableProviderAdapter::new(table))
};
let source = provider_as_source(provider);
let provider = DfTableProviderAdapter::new(table);
let source = provider_as_source(Arc::new(provider));
let _ = self.resolved_tables.insert(resolved_name, source.clone());
Ok(source)
}
@@ -147,7 +103,6 @@ impl DfTableSourceProvider {
#[cfg(test)]
mod tests {
use common_query::test_util::DummyDecoder;
use session::context::QueryContext;
use super::*;
@@ -157,12 +112,8 @@ mod tests {
fn test_validate_table_ref() {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider = DfTableSourceProvider::new(
MemoryCatalogManager::with_default_setup(),
true,
query_ctx,
DummyDecoder::arc(),
);
let table_provider =
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
let table_ref = TableReference::bare("table_name");
let result = table_provider.resolve_table_ref(table_ref);
@@ -197,99 +148,4 @@ mod tests {
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
}
use std::collections::HashSet;
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use common_config::Mode;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_query::error::Result as QueryResult;
use common_query::logical_plan::SubstraitPlanDecoder;
use datafusion::catalog::CatalogProviderList;
use datafusion::logical_expr::builder::LogicalTableSource;
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
struct MockDecoder;
impl MockDecoder {
pub fn arc() -> Arc<Self> {
Arc::new(MockDecoder)
}
}
#[async_trait::async_trait]
impl SubstraitPlanDecoder for MockDecoder {
async fn decode(
&self,
_message: bytes::Bytes,
_catalog_list: Arc<dyn CatalogProviderList>,
_optimize: bool,
) -> QueryResult<LogicalPlan> {
Ok(mock_plan())
}
}
fn mock_plan() -> LogicalPlan {
let schema = Schema::new(vec![
Field::new("id", DataType::Int32, true),
Field::new("name", DataType::Utf8, true),
]);
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
let projection = None;
let builder =
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
builder
.filter(col("id").gt(lit(500)))
.unwrap()
.build()
.unwrap()
}
#[tokio::test]
async fn test_resolve_view() {
let query_ctx = &QueryContext::with("greptime", "public");
let backend = Arc::new(MemoryKvBackend::default());
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
.add_cache_registry(CacheRegistryBuilder::default().build());
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
let layered_cache_registry = Arc::new(
with_default_composite_cache_registry(
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
)
.unwrap()
.build(),
);
let catalog_manager = KvBackendCatalogManager::new(
Mode::Standalone,
None,
backend.clone(),
layered_cache_registry,
);
let table_metadata_manager = TableMetadataManager::new(backend);
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
view_info.table_type = TableType::View;
let logical_plan = vec![1, 2, 3];
// Create view metadata
table_metadata_manager
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
.await
.unwrap();
let mut table_provider =
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
// View not found
let table_ref = TableReference::bare("not_exists_view");
assert!(table_provider.resolve_table(table_ref).await.is_err());
let table_ref = TableReference::bare(view_info.name);
let source = table_provider.resolve_table(table_ref).await.unwrap();
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
}
}

View File

@@ -1,129 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Dummy catalog for region server.
use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
use common_catalog::format_full_table_name;
use datafusion::catalog::schema::SchemaProvider;
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
use datafusion::datasource::TableProvider;
use snafu::OptionExt;
use table::table::adapter::DfTableProviderAdapter;
use crate::error::TableNotExistSnafu;
use crate::CatalogManagerRef;
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
#[derive(Clone)]
pub struct DummyCatalogList {
catalog_manager: CatalogManagerRef,
}
impl DummyCatalogList {
/// Creates a new catalog list with the given catalog manager.
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
Self { catalog_manager }
}
}
impl CatalogProviderList for DummyCatalogList {
fn as_any(&self) -> &dyn Any {
self
}
fn register_catalog(
&self,
_name: String,
_catalog: Arc<dyn CatalogProvider>,
) -> Option<Arc<dyn CatalogProvider>> {
None
}
fn catalog_names(&self) -> Vec<String> {
vec![]
}
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
Some(Arc::new(DummyCatalogProvider {
catalog_name: catalog_name.to_string(),
catalog_manager: self.catalog_manager.clone(),
}))
}
}
/// A dummy catalog provider for [DummyCatalogList].
#[derive(Clone)]
struct DummyCatalogProvider {
catalog_name: String,
catalog_manager: CatalogManagerRef,
}
impl CatalogProvider for DummyCatalogProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn schema_names(&self) -> Vec<String> {
vec![]
}
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
Some(Arc::new(DummySchemaProvider {
catalog_name: self.catalog_name.clone(),
schema_name: schema_name.to_string(),
catalog_manager: self.catalog_manager.clone(),
}))
}
}
/// A dummy schema provider for [DummyCatalogList].
#[derive(Clone)]
struct DummySchemaProvider {
catalog_name: String,
schema_name: String,
catalog_manager: CatalogManagerRef,
}
#[async_trait]
impl SchemaProvider for DummySchemaProvider {
fn as_any(&self) -> &dyn Any {
self
}
fn table_names(&self) -> Vec<String> {
vec![]
}
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
let table = self
.catalog_manager
.table(&self.catalog_name, &self.schema_name, name)
.await?
.with_context(|| TableNotExistSnafu {
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
})?;
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
Ok(Some(table_provider))
}
fn table_exist(&self, _name: &str) -> bool {
true
}
}

View File

@@ -173,14 +173,14 @@ impl Client {
Ok(FlightClient { addr, client })
}
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
let (addr, channel) = self.find_channel()?;
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
let client = PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
.max_encoding_message_size(self.max_grpc_send_message_size())
.accept_compressed(CompressionEncoding::Zstd)
.send_compressed(CompressionEncoding::Zstd);
Ok((addr, client))
Ok(client)
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {

View File

@@ -89,9 +89,8 @@ pub enum Error {
source: common_grpc::error::Error,
},
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
#[snafu(display("Failed to request RegionServer, code: {}", code))]
RegionServer {
addr: String,
code: Code,
source: BoxedError,
#[snafu(implicit)]

View File

@@ -12,12 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(assert_matches)]
mod client;
pub mod client_manager;
#[cfg(feature = "testing")]
mod database;
pub mod error;
pub mod load_balance;
mod metrics;
@@ -33,8 +29,6 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt;
pub use self::client::Client;
#[cfg(feature = "testing")]
pub use self::database::Database;
pub use self::error::{Error, Result};
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};

View File

@@ -177,7 +177,7 @@ impl RegionRequester {
.with_label_values(&[request_type.as_str()])
.start_timer();
let (addr, mut client) = self.client.raw_region_client()?;
let mut client = self.client.raw_region_client()?;
let response = client
.handle(request)
@@ -187,7 +187,6 @@ impl RegionRequester {
let err: error::Error = e.into();
// Uses `Error::RegionServer` instead of `Error::Server`
error::Error::RegionServer {
addr,
code,
source: BoxedError::new(err),
location: location!(),

View File

@@ -80,7 +80,6 @@ tracing-appender = "0.2"
tikv-jemallocator = "0.5"
[dev-dependencies]
client = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
serde.workspace = true
temp-env = "0.3"

View File

@@ -22,14 +22,18 @@ mod helper;
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
#[allow(unused)]
mod repl;
// mod repl;
// TODO(weny): Removes it
#[allow(deprecated)]
mod upgrade;
use async_trait::async_trait;
use bench::BenchTableMetadataCommand;
use clap::Parser;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
pub use repl::Repl;
use tracing_appender::non_blocking::WorkerGuard;
// pub use repl::Repl;
use upgrade::UpgradeCommand;
use self::export::ExportCommand;
use crate::error::Result;
@@ -112,6 +116,7 @@ impl Command {
#[derive(Parser)]
enum SubCommand {
// Attach(AttachCommand),
Upgrade(UpgradeCommand),
Bench(BenchTableMetadataCommand),
Export(ExportCommand),
}
@@ -120,6 +125,7 @@ impl SubCommand {
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
match self {
// SubCommand::Attach(cmd) => cmd.build().await,
SubCommand::Upgrade(cmd) => cmd.build(guard).await,
SubCommand::Bench(cmd) => cmd.build(guard).await,
SubCommand::Export(cmd) => cmd.build(guard).await,
}

View File

@@ -23,13 +23,13 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::table_name::TableName;
use common_telemetry::info;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema};
use rand::Rng;
use store_api::storage::RegionNumber;
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
use table::table_name::TableName;
use tracing_appender::non_blocking::WorkerGuard;
use self::metadata::TableMetadataBencher;

View File

@@ -16,7 +16,7 @@ use std::time::Instant;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
use table::table_name::TableName;
use common_meta::table_name::TableName;
use crate::cli::bench::{
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,

View File

@@ -434,80 +434,3 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
#[cfg(test)]
mod tests {
use clap::Parser;
use client::{Client, Database};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::logging::LoggingOptions;
use crate::error::Result as CmdResult;
use crate::options::GlobalOptions;
use crate::{cli, standalone, App};
#[tokio::test(flavor = "multi_thread")]
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
let output_dir = tempfile::tempdir().unwrap();
let standalone = standalone::Command::parse_from([
"standalone",
"start",
"--data-home",
&*output_dir.path().to_string_lossy(),
]);
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
let mut instance = standalone.build(standalone_opts).await?;
instance.start().await?;
let client = Client::with_urls(["127.0.0.1:4001"]);
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
database
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
.await
.unwrap();
database
.sql(
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
ts TIMESTAMP,
TIME INDEX (ts)
) engine=mito;
"#,
)
.await
.unwrap();
let output_dir = tempfile::tempdir().unwrap();
let cli = cli::Command::parse_from([
"cli",
"export",
"--addr",
"127.0.0.1:4000",
"--output-dir",
&*output_dir.path().to_string_lossy(),
"--target",
"create-table",
]);
let mut cli_app = cli.build(LoggingOptions::default()).await?;
cli_app.start().await?;
instance.stop().await?;
let output_file = output_dir
.path()
.join("greptime-cli.export.create_table.sql");
let res = std::fs::read_to_string(output_file).unwrap();
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
"ts" TIMESTAMP(3) NOT NULL,
TIME INDEX ("ts")
)
ENGINE=mito
;
"#;
assert_eq!(res.trim(), expect.trim());
Ok(())
}
}

View File

@@ -16,18 +16,14 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use cache::{
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
TABLE_ROUTE_CACHE_NAME,
};
use catalog::kvbackend::{
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_config::Mode;
use common_error::ext::ErrorExt;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::cache_invalidator::MultiCacheInvalidator;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::debug;
@@ -37,18 +33,17 @@ use query::datafusion::DatafusionQueryEngine;
use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::query_engine::{DefaultSerializer, QueryEngineState};
use query::query_engine::QueryEngineState;
use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
use snafu::{OptionExt, ResultExt};
use snafu::ResultExt;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
use crate::cli::helper::RustylineHelper;
use crate::cli::AttachCommand;
use crate::error;
use crate::error::{
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
@@ -185,7 +180,7 @@ impl Repl {
.context(PlanStatementSnafu)?;
let plan = DFLogicalSubstraitConvertor {}
.encode(&plan, DefaultSerializer)
.encode(&plan)
.context(SubstraitEncodeLogicalPlanSnafu)?;
self.database.logical_plan(plan.to_vec()).await
@@ -262,30 +257,19 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
CacheRegistryBuilder::default()
.add_cache(cached_meta_backend.clone())
.build(),
);
let fundamental_cache_registry =
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
let layered_cache_registry = Arc::new(
with_default_composite_cache_registry(
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
)
.context(error::BuildCacheRegistrySnafu)?
.build(),
);
let catalog_manager = KvBackendCatalogManager::new(
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
cached_meta_backend.clone(),
]));
let catalog_list = KvBackendCatalogManager::new(
Mode::Distributed,
Some(meta_client.clone()),
cached_meta_backend.clone(),
layered_cache_registry,
);
multi_cache_invalidator,
)
.await;
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_manager,
catalog_list,
None,
None,
None,

584
src/cmd/src/cli/upgrade.rs Normal file
View File

@@ -0,0 +1,584 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use clap::Parser;
use client::api::v1::meta::TableRouteValue;
use common_meta::ddl::utils::region_storage_path;
use common_meta::error as MetaError;
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use common_meta::range_stream::PaginationStream;
use common_meta::rpc::router::TableRoute;
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
use common_meta::rpc::KeyValue;
use common_meta::util::get_prefix_end_key;
use common_telemetry::info;
use etcd_client::Client;
use futures::TryStreamExt;
use prost::Message;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
use crate::cli::{Instance, Tool};
use crate::error::{self, ConnectEtcdSnafu, Result};
#[derive(Debug, Default, Parser)]
pub struct UpgradeCommand {
#[clap(long)]
etcd_addr: String,
#[clap(long)]
dryrun: bool,
#[clap(long)]
skip_table_global_keys: bool,
#[clap(long)]
skip_catalog_keys: bool,
#[clap(long)]
skip_schema_keys: bool,
#[clap(long)]
skip_table_route_keys: bool,
}
impl UpgradeCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let client = Client::connect([&self.etcd_addr], None)
.await
.context(ConnectEtcdSnafu {
etcd_addr: &self.etcd_addr,
})?;
let tool = MigrateTableMetadata {
etcd_store: EtcdStore::with_etcd_client(client, 128),
dryrun: self.dryrun,
skip_catalog_keys: self.skip_catalog_keys,
skip_table_global_keys: self.skip_table_global_keys,
skip_schema_keys: self.skip_schema_keys,
skip_table_route_keys: self.skip_table_route_keys,
};
Ok(Instance::new(Box::new(tool), guard))
}
}
struct MigrateTableMetadata {
etcd_store: KvBackendRef,
dryrun: bool,
skip_table_global_keys: bool,
skip_catalog_keys: bool,
skip_schema_keys: bool,
skip_table_route_keys: bool,
}
#[async_trait]
impl Tool for MigrateTableMetadata {
// migrates database's metadata from 0.3 to 0.4.
async fn do_work(&self) -> Result<()> {
if !self.skip_table_global_keys {
self.migrate_table_global_values().await?;
}
if !self.skip_catalog_keys {
self.migrate_catalog_keys().await?;
}
if !self.skip_schema_keys {
self.migrate_schema_keys().await?;
}
if !self.skip_table_route_keys {
self.migrate_table_route_keys().await?;
}
Ok(())
}
}
const PAGE_SIZE: usize = 1000;
impl MigrateTableMetadata {
async fn migrate_table_route_keys(&self) -> Result<()> {
let key = b"__meta_table_route".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
self.etcd_store.clone(),
RangeRequest::new().with_range(key, range_end),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let value =
TableRouteValue::decode(&kv.value[..]).context(MetaError::DecodeProtoSnafu)?;
Ok((kv.key, value))
}),
);
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
let table_id = self.migrate_table_route_key(value).await?;
keys.push(key);
keys.push(TableRegionKey::new(table_id).to_bytes())
}
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
self.delete_migrated_keys(keys).await;
Ok(())
}
async fn migrate_table_route_key(&self, value: TableRouteValue) -> Result<u32> {
let table_route = TableRoute::try_from_raw(
&value.peers,
value.table_route.expect("expected table_route"),
)
.unwrap();
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
let table_id = table_route.table.id as u32;
let new_key = TableRouteKey::new(table_id);
info!("Creating '{new_key}'");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.to_bytes())
.with_value(new_table_value.try_as_raw_value().unwrap()),
)
.await
.unwrap();
}
Ok(table_id)
}
async fn migrate_schema_keys(&self) -> Result<()> {
// The schema key prefix.
let key = b"__s".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
self.etcd_store.clone(),
RangeRequest::new().with_range(key, range_end),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let key_str =
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
let key = v1SchemaKey::parse(key_str)
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
Ok(key)
}),
);
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_schema_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
info!("Total migrated SchemaKeys: {}", keys.len());
self.delete_migrated_keys(keys).await;
Ok(())
}
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
let schema_name_value = SchemaNameValue::default();
info!("Creating '{new_key}'");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.to_bytes())
.with_value(schema_name_value.try_as_raw_value().unwrap()),
)
.await
.unwrap();
}
Ok(())
}
async fn migrate_catalog_keys(&self) -> Result<()> {
// The catalog key prefix.
let key = b"__c".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
self.etcd_store.clone(),
RangeRequest::new().with_range(key, range_end),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let key_str =
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
let key = v1CatalogKey::parse(key_str)
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
Ok(key)
}),
);
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_catalog_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
info!("Total migrated CatalogKeys: {}", keys.len());
self.delete_migrated_keys(keys).await;
Ok(())
}
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
let new_key = CatalogNameKey::new(&key.catalog_name);
let catalog_name_value = CatalogNameValue;
info!("Creating '{new_key}'");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.put(
PutRequest::new()
.with_key(new_key.to_bytes())
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
)
.await
.unwrap();
}
}
async fn migrate_table_global_values(&self) -> Result<()> {
let key = b"__tg".to_vec();
let range_end = get_prefix_end_key(&key);
let mut keys = Vec::new();
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
let mut stream = PaginationStream::new(
self.etcd_store.clone(),
RangeRequest::new().with_range(key, range_end.clone()),
PAGE_SIZE,
Arc::new(|kv: KeyValue| {
let key = String::from_utf8_lossy(kv.key()).to_string();
let value = TableGlobalValue::from_bytes(kv.value())
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
Ok((key, value))
}),
);
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
self.create_table_name_key(&value).await;
self.create_datanode_table_keys(&value).await;
self.split_table_global_value(&key, value).await;
keys.push(key.as_bytes().to_vec());
}
info!("Total migrated TableGlobalKeys: {}", keys.len());
self.delete_migrated_keys(keys).await;
Ok(())
}
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
for keys in keys.chunks(PAGE_SIZE) {
info!("Deleting {} keys", keys.len());
let req = BatchDeleteRequest {
keys: keys.to_vec(),
prev_kv: false,
};
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store.batch_delete(req).await.unwrap();
}
}
}
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
let table_id = value.table_id();
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();
let table_info_key = TableInfoKey::new(table_id);
let table_info_value = TableInfoValue::new(value.table_info);
let table_region_key = TableRegionKey::new(table_id);
let table_region_value = TableRegionValue::new(region_distribution);
info!("Splitting TableGlobalKey '{key}' into '{table_info_key}' and '{table_region_key}'");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.batch_put(
BatchPutRequest::new()
.add_kv(
table_info_key.to_bytes(),
table_info_value.try_as_raw_value().unwrap(),
)
.add_kv(
table_region_key.to_bytes(),
table_region_value.try_as_raw_value().unwrap(),
),
)
.await
.unwrap();
}
}
async fn create_table_name_key(&self, value: &TableGlobalValue) {
let table_info = &value.table_info;
let table_id = value.table_id();
let table_name_key = TableNameKey::new(
&table_info.catalog_name,
&table_info.schema_name,
&table_info.name,
);
let table_name_value = TableNameValue::new(table_id);
info!("Creating '{table_name_key}' => {table_id}");
if self.dryrun {
info!("Dryrun: do nothing");
} else {
self.etcd_store
.put(
PutRequest::new()
.with_key(table_name_key.to_bytes())
.with_value(table_name_value.try_as_raw_value().unwrap()),
)
.await
.unwrap();
}
}
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
let table_id = value.table_id();
let engine = value.table_info.meta.engine.as_str();
let region_storage_path = region_storage_path(
&value.table_info.catalog_name,
&value.table_info.schema_name,
);
let region_distribution: RegionDistribution =
value.regions_id_map.clone().into_iter().collect();
// TODO(niebayes): properly fetch or construct wal options.
let region_wal_options = HashMap::default();
let datanode_table_kvs = region_distribution
.into_iter()
.map(|(datanode_id, regions)| {
let k = DatanodeTableKey::new(datanode_id, table_id);
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
(
k,
DatanodeTableValue::new(
table_id,
regions,
RegionInfo {
engine: engine.to_string(),
region_storage_path: region_storage_path.clone(),
region_options: (&value.table_info.meta.options).into(),
region_wal_options: region_wal_options.clone(),
},
),
)
})
.collect::<Vec<_>>();
if self.dryrun {
info!("Dryrun: do nothing");
} else {
let mut req = BatchPutRequest::new();
for (key, value) in datanode_table_kvs {
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
}
self.etcd_store.batch_put(req).await.unwrap();
}
}
}
#[deprecated(since = "0.4.0", note = "Used for migrate old version(v0.3) metadata")]
mod v1_helper {
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use err::{DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu};
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId};
pub const CATALOG_KEY_PREFIX: &str = "__c";
pub const SCHEMA_KEY_PREFIX: &str = "__s";
/// The pattern of a valid catalog, schema or table name.
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex =
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
))
.unwrap();
}
/// Table global info contains necessary info for a datanode to create table regions, including
/// table id, table meta(schema...), region id allocation across datanodes.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct TableGlobalValue {
/// Id of datanode that created the global table info kv. only for debugging.
pub node_id: u64,
/// Allocation of region ids across all datanodes.
pub regions_id_map: HashMap<u64, Vec<u32>>,
pub table_info: RawTableInfo,
}
impl TableGlobalValue {
pub fn table_id(&self) -> TableId {
self.table_info.ident.table_id
}
}
pub struct CatalogKey {
pub catalog_name: String,
}
impl Display for CatalogKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(CATALOG_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)
}
}
impl CatalogKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = CATALOG_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CatalogValue;
pub struct SchemaKey {
pub catalog_name: String,
pub schema_name: String,
}
impl Display for SchemaKey {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(SCHEMA_KEY_PREFIX)?;
f.write_str("-")?;
f.write_str(&self.catalog_name)?;
f.write_str("-")?;
f.write_str(&self.schema_name)
}
}
impl SchemaKey {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
let key = s.as_ref();
let captures = SCHEMA_KEY_PATTERN
.captures(key)
.context(InvalidCatalogSnafu { key })?;
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
Ok(Self {
catalog_name: captures[1].to_string(),
schema_name: captures[2].to_string(),
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SchemaValue;
macro_rules! define_catalog_value {
( $($val_ty: ty), *) => {
$(
impl $val_ty {
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
serde_json::from_str(s.as_ref())
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
}
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
}
}
)*
}
}
define_catalog_value!(TableGlobalValue);
mod err {
use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Invalid catalog info: {}", key))]
InvalidCatalog {
key: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
DeserializeCatalogEntryValue {
raw: String,
#[snafu(implicit)]
location: Location,
source: serde_json::error::Error,
},
}
}
}

View File

@@ -23,6 +23,7 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use common_wal::config::DatanodeWalConfig;
use datanode::config::DatanodeOptions;
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::service::DatanodeServiceBuilder;
use meta_client::MetaClientOptions;
@@ -33,13 +34,11 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::options::GlobalOptions;
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-datanode";
type DatanodeOptions = GreptimeOptions<datanode::config::DatanodeOptions>;
pub struct Instance {
datanode: Datanode,
@@ -98,9 +97,7 @@ impl Command {
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
match &self.subcmd {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
self.subcmd.load_options(global_options)
}
}
@@ -115,6 +112,12 @@ impl SubCommand {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
#[derive(Debug, Parser, Default)]
@@ -143,25 +146,22 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
let mut opts = DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
self.merge_with_cli_options(
global_options,
DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
opts: &mut DatanodeOptions,
) -> Result<()> {
let opts = &mut opts.component;
mut opts: DatanodeOptions,
) -> Result<DatanodeOptions> {
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -231,28 +231,25 @@ impl StartCommand {
// Disable dashboard in datanode.
opts.http.disable_dashboard = true;
Ok(())
Ok(opts)
}
async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
async fn build(&self, mut opts: DatanodeOptions) -> Result<Instance> {
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.map(|x| x.to_string()),
&opts.logging,
&opts.tracing,
opts.node_id.map(|x| x.to_string()),
);
log_versions(version!(), short_version!());
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);
let mut opts = opts.component;
let plugins = plugins::setup_datanode_plugins(&mut opts)
.await
.context(StartDatanodeSnafu)?;
info!("Datanode start command: {:#?}", self);
info!("Datanode options: {:#?}", opts);
let node_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -356,7 +353,7 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&Default::default()).unwrap().component;
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!(Some(42), options.node_id);
@@ -417,8 +414,7 @@ mod tests {
fn test_try_from_cmd() {
let opt = StartCommand::default()
.load_options(&GlobalOptions::default())
.unwrap()
.component;
.unwrap();
assert_eq!(Mode::Standalone, opt.mode);
let opt = (StartCommand {
@@ -427,8 +423,7 @@ mod tests {
..Default::default()
})
.load_options(&GlobalOptions::default())
.unwrap()
.component;
.unwrap();
assert_eq!(Mode::Distributed, opt.mode);
assert!((StartCommand {
@@ -459,8 +454,7 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap()
.component;
.unwrap();
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -542,7 +536,7 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&Default::default()).unwrap().component;
let opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
@@ -568,10 +562,7 @@ mod tests {
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
// Should be default value.
assert_eq!(
opts.http.addr,
DatanodeOptions::default().component.http.addr
);
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
},
);
}

View File

@@ -163,15 +163,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to request database, sql: {sql}"))]
RequestDatabase {
sql: String,
#[snafu(source)]
source: client::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to collect RecordBatches"))]
CollectRecordBatches {
#[snafu(implicit)]
@@ -363,7 +354,6 @@ impl ErrorExt for Error {
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
StatusCode::Internal
}
Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source, .. }
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
Error::StartMetaClient { source, .. } => source.status_code(),
@@ -375,11 +365,11 @@ impl ErrorExt for Error {
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
Error::Other { source, .. } => source.status_code(),
Error::BuildRuntime { source, .. } => source.status_code(),
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
}
}

View File

@@ -16,7 +16,10 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use cache::{
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
TABLE_ROUTE_CACHE_NAME,
};
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use clap::Parser;
use client::client_manager::DatanodeClients;
@@ -29,6 +32,7 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
@@ -43,11 +47,9 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::options::GlobalOptions;
use crate::{log_versions, App};
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
pub struct Instance {
frontend: FeInstance,
@@ -165,25 +167,22 @@ pub struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
let mut opts = FrontendOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
self.merge_with_cli_options(
global_options,
FrontendOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
opts: &mut FrontendOptions,
) -> Result<()> {
let opts = &mut opts.component;
mut opts: FrontendOptions,
) -> Result<FrontendOptions> {
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -246,29 +245,26 @@ impl StartCommand {
opts.user_provider.clone_from(&self.user_provider);
Ok(())
Ok(opts)
}
async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
async fn build(&self, mut opts: FrontendOptions) -> Result<Instance> {
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.component.logging,
&opts.component.tracing,
opts.component.node_id.clone(),
&opts.logging,
&opts.tracing,
opts.node_id.clone(),
);
log_versions(version!(), short_version!());
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);
let mut opts = opts.component;
#[allow(clippy::unnecessary_mut_passed)]
let plugins = plugins::setup_frontend_plugins(&mut opts)
.await
.context(StartFrontendSnafu)?;
info!("Frontend start command: {:#?}", self);
info!("Frontend options: {:#?}", opts);
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
@@ -306,12 +302,25 @@ impl StartCommand {
.build(),
);
let table_cache = layered_cache_registry
.get()
.context(error::CacheRequiredSnafu {
name: TABLE_CACHE_NAME,
})?;
let table_route_cache =
layered_cache_registry
.get()
.context(error::CacheRequiredSnafu {
name: TABLE_ROUTE_CACHE_NAME,
})?;
let catalog_manager = KvBackendCatalogManager::new(
opts.mode,
Some(meta_client.clone()),
cached_meta_backend.clone(),
layered_cache_registry.clone(),
);
table_cache,
table_route_cache,
)
.await;
let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
@@ -387,14 +396,14 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&Default::default()).unwrap().component;
let opts = command.load_options(&GlobalOptions::default()).unwrap();
assert_eq!(opts.http.addr, "127.0.0.1:1234");
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
let default_opts = FrontendOptions::default().component;
let default_opts = FrontendOptions::default();
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
assert!(opts.mysql.enable);
@@ -435,8 +444,7 @@ mod tests {
..Default::default()
};
let fe_opts = command.load_options(&Default::default()).unwrap().component;
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
@@ -450,7 +458,7 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let mut fe_opts = frontend::frontend::FrontendOptions {
let mut fe_opts = FrontendOptions {
http: HttpOptions {
disable_dashboard: false,
..Default::default()
@@ -487,8 +495,7 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap()
.component;
.unwrap();
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -566,7 +573,7 @@ mod tests {
..Default::default()
};
let fe_opts = command.load_options(&Default::default()).unwrap().component;
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql.runtime_size, 11);

View File

@@ -21,15 +21,14 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use meta_srv::bootstrap::MetasrvInstance;
use meta_srv::metasrv::MetasrvOptions;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::options::GlobalOptions;
use crate::{log_versions, App};
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
pub const APP_NAME: &str = "greptime-metasrv";
pub struct Instance {
@@ -140,25 +139,22 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
let mut opts = MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
self.merge_with_cli_options(
global_options,
MetasrvOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
opts: &mut MetasrvOptions,
) -> Result<()> {
let opts = &mut opts.component;
mut opts: MetasrvOptions,
) -> Result<MetasrvOptions> {
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -221,28 +217,21 @@ impl StartCommand {
// Disable dashboard in metasrv.
opts.http.disable_dashboard = true;
Ok(())
Ok(opts)
}
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.component.logging,
&opts.component.tracing,
None,
);
async fn build(&self, mut opts: MetasrvOptions) -> Result<Instance> {
let guard =
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
log_versions(version!(), short_version!());
info!("Metasrv start command: {:#?}", self);
info!("Metasrv options: {:#?}", opts);
let mut opts = opts.component;
let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
info!("Metasrv start command: {:#?}", self);
info!("Metasrv options: {:#?}", opts);
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await
.context(error::BuildMetaServerSnafu)?;
@@ -277,7 +266,7 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&Default::default()).unwrap().component;
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
assert_eq!(SelectorType::LoadBased, options.selector);
@@ -310,7 +299,7 @@ mod tests {
..Default::default()
};
let options = cmd.load_options(&Default::default()).unwrap().component;
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
@@ -360,8 +349,7 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap()
.component;
.unwrap();
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -418,7 +406,7 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&Default::default()).unwrap().component;
let opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
assert_eq!(opts.bind_addr, "127.0.0.1:14002");

View File

@@ -13,9 +13,6 @@
// limitations under the License.
use clap::Parser;
use common_config::Configurable;
use common_runtime::global::RuntimeOptions;
use serde::{Deserialize, Serialize};
#[derive(Parser, Default, Debug, Clone)]
pub struct GlobalOptions {
@@ -32,22 +29,3 @@ pub struct GlobalOptions {
#[arg(global = true)]
pub tokio_console_addr: Option<String>,
}
// TODO(LFC): Move logging and tracing options into global options, like the runtime options.
/// All the options of GreptimeDB.
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct GreptimeOptions<T> {
/// The runtime options.
pub runtime: RuntimeOptions,
/// The options of each component (like Datanode or Standalone) of GreptimeDB.
#[serde(flatten)]
pub component: T,
}
impl<T: Configurable> Configurable for GreptimeOptions<T> {
fn env_list_keys() -> Option<&'static [&'static str]> {
T::env_list_keys()
}
}

View File

@@ -16,7 +16,10 @@ use std::sync::Arc;
use std::{fs, path};
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use cache::{
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
TABLE_ROUTE_CACHE_NAME,
};
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
@@ -58,16 +61,16 @@ use servers::export_metrics::ExportMetricsOption;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
BuildCacheRegistrySnafu, CacheRequiredSnafu, CreateDirSnafu, IllegalConfigSnafu,
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result,
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::options::GlobalOptions;
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-standalone";
@@ -79,14 +82,11 @@ pub struct Command {
}
impl Command {
pub async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
pub async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
pub fn load_options(
&self,
global_options: &GlobalOptions,
) -> Result<GreptimeOptions<StandaloneOptions>> {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
self.subcmd.load_options(global_options)
}
}
@@ -97,23 +97,20 @@ enum SubCommand {
}
impl SubCommand {
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
fn load_options(
&self,
global_options: &GlobalOptions,
) -> Result<GreptimeOptions<StandaloneOptions>> {
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct StandaloneOptions {
pub mode: Mode,
@@ -167,7 +164,7 @@ impl Default for StandaloneOptions {
}
}
impl Configurable for StandaloneOptions {
impl Configurable<'_> for StandaloneOptions {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["wal.broker_endpoints"])
}
@@ -297,27 +294,23 @@ pub struct StartCommand {
}
impl StartCommand {
fn load_options(
&self,
global_options: &GlobalOptions,
) -> Result<GreptimeOptions<StandaloneOptions>> {
let mut opts = GreptimeOptions::<StandaloneOptions>::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
self.merge_with_cli_options(
global_options,
StandaloneOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
)
.context(LoadLayeredConfigSnafu)?,
)
.context(LoadLayeredConfigSnafu)?;
self.merge_with_cli_options(global_options, &mut opts.component)?;
Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
pub fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
opts: &mut StandaloneOptions,
) -> Result<()> {
mut opts: StandaloneOptions,
) -> Result<StandaloneOptions> {
// Should always be standalone mode.
opts.mode = Mode::Standalone;
@@ -379,27 +372,20 @@ impl StartCommand {
opts.user_provider.clone_from(&self.user_provider);
Ok(())
Ok(opts)
}
#[allow(unreachable_code)]
#[allow(unused_variables)]
#[allow(clippy::diverging_sub_expression)]
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
common_runtime::init_global_runtimes(&opts.runtime);
let guard = common_telemetry::init_global_logging(
APP_NAME,
&opts.component.logging,
&opts.component.tracing,
None,
);
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
let guard =
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
log_versions(version!(), short_version!());
info!("Standalone start command: {:#?}", self);
info!("Standalone options: {opts:#?}");
info!("Building standalone instance with {opts:#?}");
let opts = opts.component;
let mut fe_opts = opts.frontend_options();
#[allow(clippy::unnecessary_mut_passed)]
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
@@ -435,12 +421,20 @@ impl StartCommand {
.build(),
);
let table_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
name: TABLE_CACHE_NAME,
})?;
let table_route_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
name: TABLE_ROUTE_CACHE_NAME,
})?;
let catalog_manager = KvBackendCatalogManager::new(
dn_opts.mode,
None,
kv_backend.clone(),
layered_cache_registry.clone(),
);
table_cache,
table_route_cache,
)
.await;
let table_metadata_manager =
Self::create_table_metadata_manager(kv_backend.clone()).await?;
@@ -454,11 +448,9 @@ impl StartCommand {
);
let flownode = Arc::new(flow_builder.build().await);
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
.with_kv_backend(kv_backend.clone())
.build()
.await
.context(StartDatanodeSnafu)?;
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
let node_manager = Arc::new(StandaloneDatanodeManager {
region_server: datanode.region_server(),
@@ -683,10 +675,7 @@ mod tests {
..Default::default()
};
let options = cmd
.load_options(&GlobalOptions::default())
.unwrap()
.component;
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
let fe_opts = options.frontend_options();
let dn_opts = options.datanode_options();
let logging_opts = options.logging;
@@ -747,8 +736,7 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
.unwrap()
.component;
.unwrap();
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
@@ -810,7 +798,7 @@ mod tests {
..Default::default()
};
let opts = command.load_options(&Default::default()).unwrap().component;
let opts = command.load_options(&GlobalOptions::default()).unwrap();
// Should be read from env, env > default values.
assert_eq!(opts.logging.dir, "/other/log/dir");

View File

@@ -1,218 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use cmd::options::GreptimeOptions;
use cmd::standalone::StandaloneOptions;
use common_config::Configurable;
use common_runtime::global::RuntimeOptions;
use common_telemetry::logging::LoggingOptions;
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use frontend::frontend::FrontendOptions;
use frontend::service_config::datanode::DatanodeClientOptions;
use meta_client::MetaClientOptions;
use meta_srv::metasrv::MetasrvOptions;
use meta_srv::selector::SelectorType;
use mito2::config::MitoConfig;
use servers::export_metrics::ExportMetricsOption;
#[test]
fn test_load_datanode_example_config() {
let example_config = common_test_util::find_workspace_path("config/datanode.example.toml");
let options =
GreptimeOptions::<DatanodeOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<DatanodeOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 8,
},
component: DatanodeOptions {
node_id: Some(42),
rpc_hostname: Some("127.0.0.1".to_string()),
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
metadata_cache_max_capacity: 100000,
metadata_cache_ttl: Duration::from_secs(600),
metadata_cache_tti: Duration::from_secs(300),
}),
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
dir: Some("/tmp/greptimedb/wal".to_string()),
sync_period: Some(Duration::from_secs(10)),
..Default::default()
}),
storage: StorageConfig {
data_home: "/tmp/greptimedb/".to_string(),
..Default::default()
},
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
num_workers: 8,
auto_flush_interval: Duration::from_secs(3600),
scan_parallelism: 0,
..Default::default()
})],
logging: LoggingOptions {
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}
#[test]
fn test_load_frontend_example_config() {
let example_config = common_test_util::find_workspace_path("config/frontend.example.toml");
let options =
GreptimeOptions::<FrontendOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<FrontendOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 8,
},
component: FrontendOptions {
default_timezone: Some("UTC".to_string()),
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
heartbeat_timeout: Duration::from_millis(500),
ddl_timeout: Duration::from_secs(10),
connect_timeout: Duration::from_secs(1),
tcp_nodelay: true,
metadata_cache_max_capacity: 100000,
metadata_cache_ttl: Duration::from_secs(600),
metadata_cache_tti: Duration::from_secs(300),
}),
logging: LoggingOptions {
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
datanode: frontend::service_config::DatanodeOptions {
client: DatanodeClientOptions {
connect_timeout: Duration::from_secs(10),
tcp_nodelay: true,
},
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}
#[test]
fn test_load_metasrv_example_config() {
let example_config = common_test_util::find_workspace_path("config/metasrv.example.toml");
let options =
GreptimeOptions::<MetasrvOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<MetasrvOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 8,
},
component: MetasrvOptions {
selector: SelectorType::LeaseBased,
data_home: "/tmp/metasrv/".to_string(),
logging: LoggingOptions {
dir: "/tmp/greptimedb/logs".to_string(),
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}
#[test]
fn test_load_standalone_example_config() {
let example_config = common_test_util::find_workspace_path("config/standalone.example.toml");
let options =
GreptimeOptions::<StandaloneOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<StandaloneOptions> {
runtime: RuntimeOptions {
read_rt_size: 8,
write_rt_size: 8,
bg_rt_size: 8,
},
component: StandaloneOptions {
default_timezone: Some("UTC".to_string()),
wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
dir: Some("/tmp/greptimedb/wal".to_string()),
sync_period: Some(Duration::from_secs(10)),
..Default::default()
}),
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
num_workers: 8,
auto_flush_interval: Duration::from_secs(3600),
scan_parallelism: 0,
..Default::default()
})],
storage: StorageConfig {
data_home: "/tmp/greptimedb/".to_string(),
..Default::default()
},
logging: LoggingOptions {
level: Some("info".to_string()),
otlp_endpoint: Some("".to_string()),
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
export_metrics: ExportMetricsOption {
self_import: Some(Default::default()),
remote_write: Some(Default::default()),
..Default::default()
},
..Default::default()
},
};
assert_eq!(options, expected);
}

View File

@@ -13,8 +13,7 @@
// limitations under the License.
use config::{Environment, File, FileFormat};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
@@ -26,7 +25,7 @@ pub const ENV_VAR_SEP: &str = "__";
pub const ENV_LIST_SEP: &str = ",";
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
pub trait Configurable<'de>: Serialize + Deserialize<'de> + Default + Sized {
/// Load the configuration from multiple sources and merge them.
/// The precedence order is: config file > environment variables > default values.
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
@@ -129,7 +128,7 @@ mod tests {
}
}
impl Configurable for TestDatanodeConfig {
impl Configurable<'_> for TestDatanodeConfig {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["meta_client.metasrv_addrs"])
}

View File

@@ -20,7 +20,6 @@ async-compression = { version = "0.3", features = [
] }
async-trait.workspace = true
bytes.workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-recordbatch.workspace = true
@@ -34,7 +33,6 @@ object-store.workspace = true
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
parquet.workspace = true
paste = "1.0"
rand.workspace = true
regex = "1.7"
serde.workspace = true
snafu.workspace = true
@@ -44,7 +42,4 @@ tokio-util.workspace = true
url = "2.3"
[dev-dependencies]
common-telemetry.workspace = true
common-test-util.workspace = true
dotenv.workspace = true
uuid.workspace = true

View File

@@ -92,44 +92,34 @@ impl CompressionType {
macro_rules! impl_compression_type {
($(($enum_item:ident, $prefix:ident)),*) => {
paste::item! {
use bytes::{Buf, BufMut, BytesMut};
impl CompressionType {
pub async fn encode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
match self {
$(
CompressionType::$enum_item => {
let mut buffer = Vec::with_capacity(content.remaining());
let mut buffer = Vec::with_capacity(content.as_ref().len());
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
encoder.write_all_buf(&mut content).await?;
encoder.write_all(content.as_ref()).await?;
encoder.shutdown().await?;
Ok(buffer)
}
)*
CompressionType::Uncompressed => {
let mut bs = BytesMut::with_capacity(content.remaining());
bs.put(content);
Ok(bs.to_vec())
},
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
}
}
pub async fn decode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
match self {
$(
CompressionType::$enum_item => {
let mut buffer = Vec::with_capacity(content.remaining() * 2);
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
encoder.write_all_buf(&mut content).await?;
encoder.write_all(content.as_ref()).await?;
encoder.shutdown().await?;
Ok(buffer)
}
)*
CompressionType::Uncompressed => {
let mut bs = BytesMut::with_capacity(content.remaining());
bs.put(content);
Ok(bs.to_vec())
},
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
}
}
@@ -161,13 +151,13 @@ macro_rules! impl_compression_type {
$(
#[tokio::test]
async fn [<test_ $enum_item:lower _compression>]() {
let string = "foo_bar".as_bytes();
let string = "foo_bar".as_bytes().to_vec();
let compress = CompressionType::$enum_item
.encode(string)
.encode(&string)
.await
.unwrap();
let decompress = CompressionType::$enum_item
.decode(compress.as_slice())
.decode(&compress)
.await
.unwrap();
assert_eq!(decompress, string);
@@ -175,13 +165,13 @@ macro_rules! impl_compression_type {
#[tokio::test]
async fn test_uncompression() {
let string = "foo_bar".as_bytes();
let string = "foo_bar".as_bytes().to_vec();
let compress = CompressionType::Uncompressed
.encode(string)
.encode(&string)
.await
.unwrap();
let decompress = CompressionType::Uncompressed
.decode(compress.as_slice())
.decode(&compress)
.await
.unwrap();
assert_eq!(decompress, string);

View File

@@ -36,7 +36,6 @@ use datafusion::physical_plan::SendableRecordBatchStream;
use futures::StreamExt;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncWriteCompatExt;
use self::csv::CsvFormat;
use self::json::JsonFormat;
@@ -46,7 +45,6 @@ use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
use crate::compression::CompressionType;
use crate::error::{self, Result};
use crate::share_buffer::SharedBuffer;
use crate::DEFAULT_WRITE_BUFFER_SIZE;
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
pub const FORMAT_DELIMITER: &str = "delimiter";
@@ -148,8 +146,7 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
let reader = object_store
.reader(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?
.into_bytes_stream(..);
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let mut upstream = compression_type.convert_stream(reader).fuse();
@@ -205,9 +202,7 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
store
.writer_with(&path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.map(|v| v.into_futures_async_write().compat_write())
.context(error::WriteObjectSnafu { path })
});

View File

@@ -29,7 +29,6 @@ use datafusion::physical_plan::SendableRecordBatchStream;
use derive_builder::Builder;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tokio_util::io::SyncIoBridge;
use super::stream_to_file;
@@ -165,16 +164,10 @@ impl FileOpener for CsvOpener {
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
.compat();
.context(error::ReadObjectSnafu { path })?;
let decoded = self.compression_type.convert_async_read(reader);

View File

@@ -31,7 +31,6 @@ use datafusion::error::{DataFusionError, Result as DataFusionResult};
use datafusion::physical_plan::SendableRecordBatchStream;
use object_store::ObjectStore;
use snafu::ResultExt;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tokio_util::io::SyncIoBridge;
use super::stream_to_file;
@@ -83,16 +82,10 @@ impl Default for JsonFormat {
#[async_trait]
impl FileFormat for JsonFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
.compat();
.context(error::ReadObjectSnafu { path })?;
let decoded = self.compression_type.convert_async_read(reader);

View File

@@ -16,17 +16,15 @@ use std::sync::Arc;
use arrow_schema::{ArrowError, Schema, SchemaRef};
use async_trait::async_trait;
use bytes::Bytes;
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
use datafusion::error::{DataFusionError, Result as DfResult};
use futures::future::BoxFuture;
use futures::{FutureExt, StreamExt, TryStreamExt};
use futures::{StreamExt, TryStreamExt};
use object_store::ObjectStore;
use orc_rust::arrow_reader::ArrowReaderBuilder;
use orc_rust::async_arrow_reader::ArrowStreamReader;
use orc_rust::reader::AsyncChunkReader;
use snafu::ResultExt;
use tokio::io::{AsyncRead, AsyncSeek};
use crate::error::{self, Result};
use crate::file_format::FileFormat;
@@ -34,49 +32,18 @@ use crate::file_format::FileFormat;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct OrcFormat;
#[derive(Clone)]
pub struct ReaderAdapter {
reader: object_store::Reader,
len: u64,
}
impl ReaderAdapter {
pub fn new(reader: object_store::Reader, len: u64) -> Self {
Self { reader, len }
}
}
impl AsyncChunkReader for ReaderAdapter {
fn len(&mut self) -> BoxFuture<'_, std::io::Result<u64>> {
async move { Ok(self.len) }.boxed()
}
fn get_bytes(
&mut self,
offset_from_start: u64,
length: u64,
) -> BoxFuture<'_, std::io::Result<Bytes>> {
async move {
let bytes = self
.reader
.read(offset_from_start..offset_from_start + length)
.await?;
Ok(bytes.to_bytes())
}
.boxed()
}
}
pub async fn new_orc_stream_reader(
reader: ReaderAdapter,
) -> Result<ArrowStreamReader<ReaderAdapter>> {
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<ArrowStreamReader<R>> {
let reader_build = ArrowReaderBuilder::try_new_async(reader)
.await
.context(error::OrcReaderSnafu)?;
Ok(reader_build.build_async())
}
pub async fn infer_orc_schema(reader: ReaderAdapter) -> Result<Schema> {
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
reader: R,
) -> Result<Schema> {
let reader = new_orc_stream_reader(reader).await?;
Ok(reader.schema().as_ref().clone())
}
@@ -84,15 +51,13 @@ pub async fn infer_orc_schema(reader: ReaderAdapter) -> Result<Schema> {
#[async_trait]
impl FileFormat for OrcFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?;
let schema = infer_orc_schema(ReaderAdapter::new(reader, meta.content_length())).await?;
let schema = infer_orc_schema(reader).await?;
Ok(schema)
}
}
@@ -132,22 +97,14 @@ impl FileOpener for OrcOpener {
};
let projection = self.projection.clone();
Ok(Box::pin(async move {
let path = meta.location().to_string();
let meta = object_store
.stat(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let reader = object_store
.reader(&path)
.reader(meta.location().to_string().as_str())
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let stream_reader =
new_orc_stream_reader(ReaderAdapter::new(reader, meta.content_length()))
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let stream_reader = new_orc_stream_reader(reader)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?;
let stream =
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);

View File

@@ -29,17 +29,15 @@ use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion::physical_plan::SendableRecordBatchStream;
use futures::future::BoxFuture;
use futures::StreamExt;
use object_store::{FuturesAsyncReader, ObjectStore};
use object_store::{ObjectStore, Reader, Writer};
use parquet::basic::{Compression, ZstdLevel};
use parquet::file::properties::WriterProperties;
use snafu::ResultExt;
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
use crate::error::{self, Result};
use crate::file_format::FileFormat;
use crate::share_buffer::SharedBuffer;
use crate::DEFAULT_WRITE_BUFFER_SIZE;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct ParquetFormat {}
@@ -47,16 +45,10 @@ pub struct ParquetFormat {}
#[async_trait]
impl FileFormat for ParquetFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
let meta = store
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
let mut reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
.compat();
.context(error::ReadObjectSnafu { path })?;
let metadata = reader
.get_metadata()
@@ -106,7 +98,7 @@ impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
pub struct LazyParquetFileReader {
object_store: ObjectStore,
reader: Option<Compat<FuturesAsyncReader>>,
reader: Option<Reader>,
path: String,
}
@@ -122,13 +114,7 @@ impl LazyParquetFileReader {
/// Must initialize the reader, or throw an error from the future.
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
if self.reader.is_none() {
let meta = self.object_store.stat(&self.path).await?;
let reader = self
.object_store
.reader(&self.path)
.await?
.into_futures_async_read(0..meta.content_length())
.compat();
let reader = self.object_store.reader(&self.path).await?;
self.reader = Some(reader);
}
@@ -181,26 +167,23 @@ pub struct BufferedWriter {
}
type InnerBufferedWriter = LazyBufferedWriter<
Compat<object_store::FuturesAsyncWriter>,
object_store::Writer,
ArrowWriter<SharedBuffer>,
impl Fn(String) -> BoxFuture<'static, Result<Compat<object_store::FuturesAsyncWriter>>>,
impl Fn(String) -> BoxFuture<'static, Result<Writer>>,
>;
impl BufferedWriter {
fn make_write_factory(
store: ObjectStore,
concurrency: usize,
) -> impl Fn(String) -> BoxFuture<'static, Result<Compat<object_store::FuturesAsyncWriter>>>
{
) -> impl Fn(String) -> BoxFuture<'static, Result<Writer>> {
move |path| {
let store = store.clone();
Box::pin(async move {
store
.writer_with(&path)
.concurrent(concurrency)
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
.await
.map(|v| v.into_futures_async_write().compat_write())
.context(error::WriteObjectSnafu { path })
})
}
@@ -278,19 +261,9 @@ pub async fn stream_to_parquet(
#[cfg(test)]
mod tests {
use std::env;
use std::sync::Arc;
use common_telemetry::warn;
use common_test_util::find_workspace_path;
use datatypes::arrow::array::{ArrayRef, Int64Array, RecordBatch};
use datatypes::arrow::datatypes::{DataType, Field, Schema};
use object_store::services::S3;
use object_store::ObjectStore;
use rand::{thread_rng, Rng};
use super::*;
use crate::file_format::parquet::BufferedWriter;
use crate::test_util::{format_schema, test_store};
fn test_data_root() -> String {
@@ -308,64 +281,4 @@ mod tests {
assert_eq!(vec!["num: Int64: NULL", "str: Utf8: NULL"], formatted);
}
#[tokio::test]
async fn test_parquet_writer() {
common_telemetry::init_default_ut_logging();
let _ = dotenv::dotenv();
let Ok(bucket) = env::var("GT_MINIO_BUCKET") else {
warn!("ignoring test parquet writer");
return;
};
let mut builder = S3::default();
let _ = builder
.root(&uuid::Uuid::new_v4().to_string())
.access_key_id(&env::var("GT_MINIO_ACCESS_KEY_ID").unwrap())
.secret_access_key(&env::var("GT_MINIO_ACCESS_KEY").unwrap())
.bucket(&bucket)
.region(&env::var("GT_MINIO_REGION").unwrap())
.endpoint(&env::var("GT_MINIO_ENDPOINT_URL").unwrap());
let object_store = ObjectStore::new(builder).unwrap().finish();
let file_path = uuid::Uuid::new_v4().to_string();
let fields = vec![
Field::new("field1", DataType::Int64, true),
Field::new("field0", DataType::Int64, true),
];
let arrow_schema = Arc::new(Schema::new(fields));
let mut buffered_writer = BufferedWriter::try_new(
file_path.clone(),
object_store.clone(),
arrow_schema.clone(),
None,
// Sets a small value.
128,
8,
)
.await
.unwrap();
let rows = 200000;
let generator = || {
let columns: Vec<ArrayRef> = vec![
Arc::new(Int64Array::from(
(0..rows)
.map(|_| thread_rng().gen::<i64>())
.collect::<Vec<_>>(),
)),
Arc::new(Int64Array::from(
(0..rows)
.map(|_| thread_rng().gen::<i64>())
.collect::<Vec<_>>(),
)),
];
RecordBatch::try_new(arrow_schema.clone(), columns).unwrap()
};
let batch = generator();
// Writes about ~30Mi
for _ in 0..10 {
buffered_writer.write(&batch).await.unwrap();
}
buffered_writer.close().await.unwrap();
}
}

View File

@@ -27,8 +27,3 @@ pub mod test_util;
#[cfg(test)]
pub mod tests;
pub mod util;
use common_base::readable_size::ReadableSize;
/// Default write buffer size, it should be greater than the default minimum upload part of S3 (5mb).
pub const DEFAULT_WRITE_BUFFER_SIZE: ReadableSize = ReadableSize::mb(8);

View File

@@ -120,7 +120,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
assert_eq_lines(written.to_vec(), origin.to_vec());
assert_eq_lines(written, origin);
}
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
@@ -158,7 +158,7 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
assert_eq_lines(written.to_vec(), origin.to_vec());
assert_eq_lines(written, origin);
}
// Ignore the CRLF difference across operating systems.

View File

@@ -10,4 +10,3 @@ workspace = true
[dependencies]
snafu.workspace = true
strum.workspace = true
tonic.workspace = true

View File

@@ -15,7 +15,6 @@
use std::fmt;
use strum::{AsRefStr, EnumIter, EnumString, FromRepr};
use tonic::Code;
/// Common status code for public API.
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr, EnumIter, FromRepr)]
@@ -203,75 +202,6 @@ impl fmt::Display for StatusCode {
}
}
#[macro_export]
macro_rules! define_into_tonic_status {
($Error: ty) => {
impl From<$Error> for tonic::Status {
fn from(err: $Error) -> Self {
use tonic::codegen::http::{HeaderMap, HeaderValue};
use tonic::metadata::MetadataMap;
use $crate::GREPTIME_DB_HEADER_ERROR_CODE;
let mut headers = HeaderMap::<HeaderValue>::with_capacity(2);
// If either of the status_code or error msg cannot convert to valid HTTP header value
// (which is a very rare case), just ignore. Client will use Tonic status code and message.
let status_code = err.status_code();
headers.insert(
GREPTIME_DB_HEADER_ERROR_CODE,
HeaderValue::from(status_code as u32),
);
let root_error = err.output_msg();
let metadata = MetadataMap::from_headers(headers);
tonic::Status::with_metadata(
$crate::status_code::status_to_tonic_code(status_code),
root_error,
metadata,
)
}
}
};
}
/// Returns the tonic [Code] of a [StatusCode].
pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
match status_code {
StatusCode::Success => Code::Ok,
StatusCode::Unknown => Code::Unknown,
StatusCode::Unsupported => Code::Unimplemented,
StatusCode::Unexpected
| StatusCode::Internal
| StatusCode::PlanQuery
| StatusCode::EngineExecuteQuery => Code::Internal,
StatusCode::InvalidArguments | StatusCode::InvalidSyntax | StatusCode::RequestOutdated => {
Code::InvalidArgument
}
StatusCode::Cancelled => Code::Cancelled,
StatusCode::TableAlreadyExists
| StatusCode::TableColumnExists
| StatusCode::RegionAlreadyExists
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
StatusCode::TableNotFound
| StatusCode::RegionNotFound
| StatusCode::TableColumnNotFound
| StatusCode::DatabaseNotFound
| StatusCode::UserNotFound
| StatusCode::FlowNotFound => Code::NotFound,
StatusCode::StorageUnavailable | StatusCode::RegionNotReady => Code::Unavailable,
StatusCode::RuntimeResourcesExhausted
| StatusCode::RateLimited
| StatusCode::RegionBusy => Code::ResourceExhausted,
StatusCode::UnsupportedPasswordType
| StatusCode::UserPasswordMismatch
| StatusCode::AuthHeaderNotFound
| StatusCode::InvalidAuthHeader => Code::Unauthenticated,
StatusCode::AccessDenied | StatusCode::PermissionDenied | StatusCode::RegionReadonly => {
Code::PermissionDenied
}
}
}
#[cfg(test)]
mod tests {
use strum::IntoEnumIterator;

View File

@@ -143,6 +143,8 @@ fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: b
min: T::Native,
max: T::Native,
) -> Result<VectorRef> {
common_telemetry::info!("[DEBUG] min {min:?}, max {max:?}");
let iter = ArrayIter::new(input);
let result = iter.map(|x| {
x.map(|x| {

View File

@@ -25,7 +25,7 @@ prost.workspace = true
snafu.workspace = true
tokio.workspace = true
tonic.workspace = true
tower.workspace = true
tower = "0.4"
[dev-dependencies]
criterion = "0.4"

View File

@@ -24,7 +24,7 @@ pub use registry::{
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
};
pub use table::{
new_table_info_cache, new_table_name_cache, new_table_route_cache, new_view_info_cache,
TableInfoCache, TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute,
TableRouteCache, TableRouteCacheRef, ViewInfoCache, ViewInfoCacheRef,
new_table_info_cache, new_table_name_cache, new_table_route_cache, TableInfoCache,
TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache,
TableRouteCacheRef,
};

View File

@@ -145,13 +145,13 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use moka::future::CacheBuilder;
use table::table_name::TableName;
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::FlowMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::table_name::TableName;
#[tokio::test]
async fn test_cache_empty_set() {

View File

@@ -15,9 +15,6 @@
mod table_info;
mod table_name;
mod table_route;
mod view_info;
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
pub use view_info::{new_view_info_cache, ViewInfoCache, ViewInfoCacheRef};

View File

@@ -18,7 +18,6 @@ use futures::future::BoxFuture;
use moka::future::Cache;
use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use crate::cache::{CacheContainer, Initializer};
use crate::error;
@@ -26,6 +25,7 @@ use crate::error::Result;
use crate::instruction::CacheIdent;
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
use crate::kv_backend::KvBackendRef;
use crate::table_name::TableName;
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;

View File

@@ -1,143 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use futures::future::BoxFuture;
use moka::future::Cache;
use snafu::OptionExt;
use store_api::storage::TableId;
use crate::cache::{CacheContainer, Initializer};
use crate::error;
use crate::error::Result;
use crate::instruction::CacheIdent;
use crate::key::view_info::{ViewInfoManager, ViewInfoManagerRef, ViewInfoValue};
use crate::kv_backend::KvBackendRef;
/// [ViewInfoCache] caches the [TableId] to [ViewInfoValue] mapping.
pub type ViewInfoCache = CacheContainer<TableId, Arc<ViewInfoValue>, CacheIdent>;
pub type ViewInfoCacheRef = Arc<ViewInfoCache>;
/// Constructs a [ViewInfoCache].
pub fn new_view_info_cache(
name: String,
cache: Cache<TableId, Arc<ViewInfoValue>>,
kv_backend: KvBackendRef,
) -> ViewInfoCache {
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
let init = init_factory(view_info_manager);
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
}
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
Arc::new(move |view_id| {
let view_info_manager = view_info_manager.clone();
Box::pin(async move {
let view_info = view_info_manager
.get(*view_id)
.await?
.context(error::ValueNotExistSnafu {})?
.into_inner();
Ok(Some(Arc::new(view_info)))
})
})
}
fn invalidator<'a>(
cache: &'a Cache<TableId, Arc<ViewInfoValue>>,
ident: &'a CacheIdent,
) -> BoxFuture<'a, Result<()>> {
Box::pin(async move {
if let CacheIdent::TableId(table_id) = ident {
cache.invalidate(table_id).await
}
Ok(())
})
}
fn filter(ident: &CacheIdent) -> bool {
matches!(ident, CacheIdent::TableId(_))
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::Arc;
use moka::future::CacheBuilder;
use table::table_name::TableName;
use super::*;
use crate::ddl::tests::create_view::test_create_view_task;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
#[tokio::test]
async fn test_view_info_cache() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
let cache = CacheBuilder::new(128).build();
let cache = new_view_info_cache("test".to_string(), cache, mem_kv.clone());
let result = cache.get(1024).await.unwrap();
assert!(result.is_none());
let mut task = test_create_view_task("my_view");
let table_names = {
let mut set = HashSet::new();
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "a_table".to_string(),
});
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "b_table".to_string(),
});
set
};
task.view_info.ident.table_id = 1024;
table_metadata_manager
.create_view_metadata(
task.view_info.clone(),
task.create_view.logical_plan.clone(),
table_names,
)
.await
.unwrap();
let view_info = cache.get(1024).await.unwrap().unwrap();
assert_eq!(view_info.view_info, task.create_view.logical_plan);
assert_eq!(
view_info.table_names,
task.create_view
.table_names
.iter()
.map(|t| t.clone().into())
.collect::<HashSet<_>>()
);
assert!(cache.contains_key(&1024));
cache
.invalidate(&[CacheIdent::TableId(1024)])
.await
.unwrap();
assert!(!cache.contains_key(&1024));
}
}

View File

@@ -48,7 +48,7 @@ pub mod table_meta;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
#[cfg(test)]
pub(crate) mod tests;
mod tests;
pub mod truncate_table;
pub mod utils;

View File

@@ -13,10 +13,10 @@
// limitations under the License.
use table::metadata::RawTableInfo;
use table::table_name::TableName;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::instruction::CacheIdent;
use crate::table_name::TableName;
impl AlterLogicalTablesProcedure {
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {

View File

@@ -18,13 +18,13 @@ use common_telemetry::{info, warn};
use itertools::Itertools;
use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use crate::cache_invalidator::Context;
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
use crate::ddl::physical_table_metadata;
use crate::error::{Result, TableInfoNotFoundSnafu};
use crate::instruction::CacheIdent;
use crate::table_name::TableName;
impl CreateLogicalTablesProcedure {
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {

View File

@@ -22,11 +22,9 @@ use strum::AsRefStr;
use table::metadata::{RawTableInfo, TableId, TableType};
use table::table_reference::TableReference;
use crate::cache_invalidator::Context;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::error::{self, Result};
use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
use crate::rpc::ddl::CreateViewTask;
@@ -159,25 +157,6 @@ impl CreateViewProcedure {
Ok(Status::executing(true))
}
async fn invalidate_view_cache(&self) -> Result<()> {
let cache_invalidator = &self.context.cache_invalidator;
let ctx = Context {
subject: Some("Invalidate view cache by creating view".to_string()),
};
cache_invalidator
.invalidate(
&ctx,
&[
CacheIdent::TableName(self.data.table_ref().into()),
CacheIdent::TableId(self.view_id()),
],
)
.await?;
Ok(())
}
/// Creates view metadata
///
/// Abort(not-retry):
@@ -196,21 +175,15 @@ impl CreateViewProcedure {
view_name: self.data.table_ref().to_string(),
})?;
let new_logical_plan = self.data.task.raw_logical_plan().clone();
let table_names = self.data.task.table_names();
manager
.update_view_info(view_id, &current_view_info, new_logical_plan, table_names)
.update_view_info(view_id, &current_view_info, new_logical_plan)
.await?;
info!("Updated view metadata for view {view_id}");
} else {
let raw_view_info = self.view_info().clone();
manager
.create_view_metadata(
raw_view_info,
self.data.task.raw_logical_plan().clone(),
self.data.task.table_names(),
)
.create_view_metadata(raw_view_info, self.data.task.raw_logical_plan())
.await?;
info!(
@@ -218,7 +191,6 @@ impl CreateViewProcedure {
ctx.procedure_id
);
}
self.invalidate_view_cache().await?;
Ok(Status::done_with_output(view_id))
}

View File

@@ -14,23 +14,19 @@
use std::any::Any;
use common_catalog::format_full_table_name;
use common_procedure::Status;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::{TableId, TableType};
use table::table_name::TableName;
use table::metadata::TableId;
use super::executor::DropDatabaseExecutor;
use super::metadata::DropDatabaseRemoveMetadata;
use super::DropTableTarget;
use crate::cache_invalidator::Context;
use crate::ddl::drop_database::{DropDatabaseContext, State};
use crate::ddl::DdlContext;
use crate::error::{Result, TableInfoNotFoundSnafu};
use crate::instruction::CacheIdent;
use crate::error::Result;
use crate::key::table_route::TableRouteValue;
use crate::table_name::TableName;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseCursor {
@@ -105,40 +101,6 @@ impl DropDatabaseCursor {
)),
}
}
async fn handle_view(
&self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
table_name: String,
table_id: TableId,
) -> Result<(Box<dyn State>, Status)> {
let view_name = TableName::new(&ctx.catalog, &ctx.schema, &table_name);
ddl_ctx
.table_metadata_manager
.destroy_view_info(table_id, &view_name)
.await?;
let cache_invalidator = &ddl_ctx.cache_invalidator;
let ctx = Context {
subject: Some("Invalidate table cache by dropping table".to_string()),
};
cache_invalidator
.invalidate(
&ctx,
&[
CacheIdent::TableName(view_name),
CacheIdent::TableId(table_id),
],
)
.await?;
Ok((
Box::new(DropDatabaseCursor::new(self.target)),
Status::executing(false),
))
}
}
#[async_trait::async_trait]
@@ -160,20 +122,6 @@ impl State for DropDatabaseCursor {
match ctx.tables.as_mut().unwrap().try_next().await? {
Some((table_name, table_name_value)) => {
let table_id = table_name_value.table_id();
let table_info_value = ddl_ctx
.table_metadata_manager
.table_info_manager()
.get(table_id)
.await?
.with_context(|| TableInfoNotFoundSnafu {
table: format_full_table_name(&ctx.catalog, &ctx.schema, &table_name),
})?;
if table_info_value.table_info.table_type == TableType::View {
return self.handle_view(ddl_ctx, ctx, table_name, table_id).await;
}
match ddl_ctx
.table_metadata_manager
.table_route_manager()

View File

@@ -19,7 +19,6 @@ use common_telemetry::info;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use super::cursor::DropDatabaseCursor;
use super::{DropDatabaseContext, DropTableTarget};
@@ -30,6 +29,7 @@ use crate::error::{self, Result};
use crate::key::table_route::TableRouteValue;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::router::{operating_leader_regions, RegionRoute};
use crate::table_name::TableName;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropDatabaseExecutor {
@@ -135,7 +135,6 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_recordbatch::SendableRecordBatchStream;
use table::table_name::TableName;
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
@@ -145,6 +144,7 @@ mod tests {
use crate::key::datanode_table::DatanodeTableKey;
use crate::peer::Peer;
use crate::rpc::router::region_distribution;
use crate::table_name::TableName;
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
#[derive(Clone)]

View File

@@ -23,7 +23,6 @@ use futures::future::join_all;
use snafu::ensure;
use store_api::storage::RegionId;
use table::metadata::TableId;
use table::table_name::TableName;
use crate::cache_invalidator::Context;
use crate::ddl::utils::add_peer_context_if_needed;
@@ -33,6 +32,7 @@ use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::table_name::TableName;
/// [Control] indicated to the caller whether to go to the next step.
#[derive(Debug)]
@@ -224,7 +224,6 @@ mod tests {
use api::v1::{ColumnDataType, SemanticType};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use table::metadata::RawTableInfo;
use table::table_name::TableName;
use super::*;
use crate::ddl::test_util::columns::TestColumnDefBuilder;
@@ -232,6 +231,7 @@ mod tests {
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
};
use crate::key::table_route::TableRouteValue;
use crate::table_name::TableName;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
fn test_create_raw_table_info(name: &str) -> RawTableInfo {

View File

@@ -17,7 +17,7 @@ mod alter_table;
mod create_flow;
mod create_logical_tables;
mod create_table;
pub(crate) mod create_view;
mod create_view;
mod drop_database;
mod drop_flow;
mod drop_table;

View File

@@ -19,7 +19,6 @@ use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_procedure_test::execute_procedure_until_done;
use session::context::QueryContext;
use table::table_name::TableName;
use crate::ddl::create_flow::CreateFlowProcedure;
use crate::ddl::test_util::create_table::test_create_table_task;
@@ -28,6 +27,7 @@ use crate::ddl::DdlContext;
use crate::key::table_route::TableRouteValue;
use crate::key::FlowId;
use crate::rpc::ddl::CreateFlowTask;
use crate::table_name::TableName;
use crate::test_util::{new_ddl_context, MockFlownodeManager};
use crate::{error, ClusterId};

View File

@@ -13,10 +13,9 @@
// limitations under the License.
use std::assert_matches::assert_matches;
use std::collections::HashSet;
use std::sync::Arc;
use api::v1::{CreateViewExpr, TableName};
use api::v1::CreateViewExpr;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
@@ -32,35 +31,7 @@ use crate::error::Error;
use crate::rpc::ddl::CreateViewTask;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
fn test_table_names() -> HashSet<table::table_name::TableName> {
let mut set = HashSet::new();
set.insert(table::table_name::TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "a_table".to_string(),
});
set.insert(table::table_name::TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "b_table".to_string(),
});
set
}
pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
let table_names = vec![
TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "a_table".to_string(),
},
TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "b_table".to_string(),
},
];
fn test_create_view_task(name: &str) -> CreateViewTask {
let expr = CreateViewExpr {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
@@ -68,7 +39,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
or_replace: false,
create_if_not_exists: false,
logical_plan: vec![1, 2, 3],
table_names,
};
let view_info = RawTableInfo {
@@ -100,11 +70,7 @@ async fn test_on_prepare_view_exists_err() {
// Puts a value to table name key.
ddl_context
.table_metadata_manager
.create_view_metadata(
task.view_info.clone(),
task.create_view.logical_plan.clone(),
test_table_names(),
)
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
.await
.unwrap();
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
@@ -124,11 +90,7 @@ async fn test_on_prepare_with_create_if_view_exists() {
// Puts a value to table name key.
ddl_context
.table_metadata_manager
.create_view_metadata(
task.view_info.clone(),
task.create_view.logical_plan.clone(),
test_table_names(),
)
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
.await
.unwrap();
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);

View File

@@ -18,7 +18,6 @@ use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_procedure_test::execute_procedure_until_done;
use table::table_name::TableName;
use crate::ddl::drop_flow::DropFlowProcedure;
use crate::ddl::test_util::create_table::test_create_table_task;
@@ -27,6 +26,7 @@ use crate::ddl::tests::create_flow::create_test_flow;
use crate::error;
use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::DropFlowTask;
use crate::table_name::TableName;
use crate::test_util::{new_ddl_context, MockFlownodeManager};
fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask {

View File

@@ -28,7 +28,6 @@ use snafu::{ensure, ResultExt};
use store_api::storage::RegionId;
use strum::AsRefStr;
use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table::table_reference::TableReference;
use super::utils::handle_retry_error;
@@ -41,6 +40,7 @@ use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::rpc::ddl::TruncateTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::table_name::TableName;
use crate::{metrics, ClusterId};
pub struct TruncateTableProcedure {

View File

@@ -489,7 +489,8 @@ async fn handle_create_table_task(
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
table_ids: vec![table_id],
table_id: Some(table_id),
..Default::default()
})
}
@@ -533,6 +534,7 @@ async fn handle_create_logical_table_tasks(
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
table_ids,
..Default::default()
})
}
@@ -688,7 +690,8 @@ async fn handle_create_view_task(
Ok(SubmitDdlTaskResponse {
key: procedure_id.into(),
table_ids: vec![view_id],
table_id: Some(view_id),
..Default::default()
})
}

View File

@@ -20,11 +20,11 @@ use serde::{Deserialize, Serialize};
use store_api::storage::{RegionId, RegionNumber};
use strum::Display;
use table::metadata::TableId;
use table::table_name::TableName;
use crate::flow_name::FlowName;
use crate::key::schema_name::SchemaName;
use crate::key::FlowId;
use crate::table_name::TableName;
use crate::{ClusterId, DatanodeId, FlownodeId};
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]

View File

@@ -89,6 +89,9 @@ pub mod flow;
pub mod schema_name;
pub mod table_info;
pub mod table_name;
// TODO(weny): removes it.
#[allow(deprecated)]
pub mod table_region;
pub mod view_info;
// TODO(weny): removes it.
#[allow(deprecated)]
@@ -116,7 +119,6 @@ use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
use table_name::{TableNameKey, TableNameManager, TableNameValue};
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
@@ -136,12 +138,14 @@ use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
use crate::rpc::store::BatchDeleteRequest;
use crate::table_name::TableName;
use crate::DatanodeId;
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
pub const MAINTENANCE_KEY: &str = "maintenance";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
@@ -486,8 +490,7 @@ impl TableMetadataManager {
pub async fn create_view_metadata(
&self,
view_info: RawTableInfo,
raw_logical_plan: Vec<u8>,
table_names: HashSet<TableName>,
raw_logical_plan: &Vec<u8>,
) -> Result<()> {
let view_id = view_info.ident.table_id;
@@ -509,7 +512,7 @@ impl TableMetadataManager {
.build_create_txn(view_id, &table_info_value)?;
// Creates view info
let view_info_value = ViewInfoValue::new(raw_logical_plan, table_names);
let view_info_value = ViewInfoValue::new(raw_logical_plan);
let (create_view_info_txn, on_create_view_info_failure) = self
.view_info_manager()
.build_create_txn(view_id, &view_info_value)?;
@@ -801,33 +804,6 @@ impl TableMetadataManager {
Ok(())
}
fn view_info_keys(&self, view_id: TableId, view_name: &TableName) -> Result<Vec<Vec<u8>>> {
let mut keys = Vec::with_capacity(3);
let view_name = TableNameKey::new(
&view_name.catalog_name,
&view_name.schema_name,
&view_name.table_name,
);
let table_info_key = TableInfoKey::new(view_id);
let view_info_key = ViewInfoKey::new(view_id);
keys.push(view_name.to_bytes());
keys.push(table_info_key.to_bytes());
keys.push(view_info_key.to_bytes());
Ok(keys)
}
/// Deletes metadata for view **permanently**.
/// The caller MUST ensure it has the exclusive access to `ViewNameKey`.
pub async fn destroy_view_info(&self, view_id: TableId, view_name: &TableName) -> Result<()> {
let keys = self.view_info_keys(view_id, view_name)?;
let _ = self
.kv_backend
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
.await?;
Ok(())
}
/// Renames the table name and returns an error if different metadata exists.
/// The caller MUST ensure it has the exclusive access to old and new `TableNameKey`s,
/// and the new `TableNameKey` MUST be empty.
@@ -927,9 +903,8 @@ impl TableMetadataManager {
view_id: TableId,
current_view_info_value: &DeserializedValueWithBytes<ViewInfoValue>,
new_view_info: Vec<u8>,
table_names: HashSet<TableName>,
) -> Result<()> {
let new_view_info_value = current_view_info_value.update(new_view_info, table_names);
let new_view_info_value = current_view_info_value.update(new_view_info);
// Updates view info.
let (update_view_info_txn, on_update_view_info_failure) = self
@@ -1199,7 +1174,7 @@ impl_optional_meta_value! {
#[cfg(test)]
mod tests {
use std::collections::{BTreeMap, HashMap, HashSet};
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use bytes::Bytes;
@@ -1208,7 +1183,6 @@ mod tests {
use futures::TryStreamExt;
use store_api::storage::RegionId;
use table::metadata::{RawTableInfo, TableInfo};
use table::table_name::TableName;
use super::datanode_table::DatanodeTableKey;
use super::test_utils;
@@ -1223,6 +1197,7 @@ mod tests {
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
use crate::table_name::TableName;
#[test]
fn test_deserialized_value_with_bytes() {
@@ -1275,21 +1250,6 @@ mod tests {
test_utils::new_test_table_info(10, region_numbers)
}
fn new_test_table_names() -> HashSet<TableName> {
let mut set = HashSet::new();
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "a_table".to_string(),
});
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "b_table".to_string(),
});
set
}
async fn create_physical_table_metadata(
table_metadata_manager: &TableMetadataManager,
table_info: RawTableInfo,
@@ -2001,11 +1961,9 @@ mod tests {
let logical_plan: Vec<u8> = vec![1, 2, 3];
let table_names = new_test_table_names();
// Create metadata
table_metadata_manager
.create_view_metadata(view_info.clone(), logical_plan.clone(), table_names.clone())
.create_view_metadata(view_info.clone(), &logical_plan)
.await
.unwrap();
@@ -2019,7 +1977,6 @@ mod tests {
.unwrap()
.into_inner();
assert_eq!(current_view_info.view_info, logical_plan);
assert_eq!(current_view_info.table_names, table_names);
// assert table info
let current_table_info = table_metadata_manager
.table_info_manager()
@@ -2032,43 +1989,16 @@ mod tests {
}
let new_logical_plan: Vec<u8> = vec![4, 5, 6];
let new_table_names = {
let mut set = HashSet::new();
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "b_table".to_string(),
});
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "c_table".to_string(),
});
set
};
let current_view_info_value = DeserializedValueWithBytes::from_inner(ViewInfoValue::new(
logical_plan.clone(),
table_names,
));
let current_view_info_value =
DeserializedValueWithBytes::from_inner(ViewInfoValue::new(&logical_plan));
// should be ok.
table_metadata_manager
.update_view_info(
view_id,
&current_view_info_value,
new_logical_plan.clone(),
new_table_names.clone(),
)
.update_view_info(view_id, &current_view_info_value, new_logical_plan.clone())
.await
.unwrap();
// if table info was updated, it should be ok.
table_metadata_manager
.update_view_info(
view_id,
&current_view_info_value,
new_logical_plan.clone(),
new_table_names.clone(),
)
.update_view_info(view_id, &current_view_info_value, new_logical_plan.clone())
.await
.unwrap();
@@ -2081,21 +2011,14 @@ mod tests {
.unwrap()
.into_inner();
assert_eq!(updated_view_info.view_info, new_logical_plan);
assert_eq!(updated_view_info.table_names, new_table_names);
let wrong_view_info = logical_plan.clone();
let wrong_view_info_value = DeserializedValueWithBytes::from_inner(
current_view_info_value.update(wrong_view_info, new_table_names.clone()),
);
let wrong_view_info_value =
DeserializedValueWithBytes::from_inner(current_view_info_value.update(wrong_view_info));
// if the current_view_info_value is wrong, it should return an error.
// The ABA problem.
assert!(table_metadata_manager
.update_view_info(
view_id,
&wrong_view_info_value,
new_logical_plan.clone(),
new_table_names.clone(),
)
.update_view_info(view_id, &wrong_view_info_value, new_logical_plan.clone())
.await
.is_err());
@@ -2108,6 +2031,5 @@ mod tests {
.unwrap()
.into_inner();
assert_eq!(current_view_info.view_info, new_logical_plan);
assert_eq!(current_view_info.table_names, new_table_names);
}
}

View File

@@ -72,8 +72,12 @@ impl DatanodeTableKey {
}
}
pub fn prefix(datanode_id: DatanodeId) -> String {
format!("{}/{datanode_id}/", DATANODE_TABLE_KEY_PREFIX)
fn prefix(datanode_id: DatanodeId) -> String {
format!("{}/{datanode_id}", DATANODE_TABLE_KEY_PREFIX)
}
pub fn range_start_key(datanode_id: DatanodeId) -> String {
format!("{}/", Self::prefix(datanode_id))
}
}
@@ -110,7 +114,7 @@ impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
impl Display for DatanodeTableKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}{}", Self::prefix(self.datanode_id), self.table_id)
write!(f, "{}/{}", Self::prefix(self.datanode_id), self.table_id)
}
}
@@ -160,7 +164,7 @@ impl DatanodeTableManager {
&self,
datanode_id: DatanodeId,
) -> BoxStream<'static, Result<DatanodeTableValue>> {
let start_key = DatanodeTableKey::prefix(datanode_id);
let start_key = DatanodeTableKey::range_start_key(datanode_id);
let req = RangeRequest::new().with_prefix(start_key.as_bytes());
let stream = PaginationStream::new(

View File

@@ -262,12 +262,12 @@ mod tests {
use futures::TryStreamExt;
use table::metadata::TableId;
use table::table_name::TableName;
use super::*;
use crate::key::flow::table_flow::TableFlowKey;
use crate::key::FlowPartitionId;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::table_name::TableName;
use crate::FlownodeId;
#[derive(Debug)]

View File

@@ -20,7 +20,6 @@ use regex::Regex;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use crate::error::{self, Result};
use crate::key::flow::FlowScoped;
@@ -28,6 +27,7 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::table_name::TableName;
use crate::FlownodeId;
const FLOW_INFO_KEY_PREFIX: &str = "info";

View File

@@ -69,7 +69,8 @@ impl FlownodeFlowKey {
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
pub fn range_start_key(flownode_id: FlownodeId) -> Vec<u8> {
let inner = BytesAdapter::from(FlownodeFlowKeyInner::prefix(flownode_id).into_bytes());
let inner =
BytesAdapter::from(FlownodeFlowKeyInner::range_start_key(flownode_id).into_bytes());
FlowScoped::new(inner).to_bytes()
}
@@ -107,8 +108,13 @@ impl FlownodeFlowKeyInner {
}
}
pub fn prefix(flownode_id: FlownodeId) -> String {
format!("{}/{flownode_id}/", FLOWNODE_FLOW_KEY_PREFIX)
fn prefix(flownode_id: FlownodeId) -> String {
format!("{}/{flownode_id}", FLOWNODE_FLOW_KEY_PREFIX)
}
/// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
fn range_start_key(flownode_id: FlownodeId) -> String {
format!("{}/", Self::prefix(flownode_id))
}
}

View File

@@ -80,7 +80,7 @@ impl TableFlowKey {
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
pub fn range_start_key(table_id: TableId) -> Vec<u8> {
let inner = BytesAdapter::from(TableFlowKeyInner::prefix(table_id).into_bytes());
let inner = BytesAdapter::from(TableFlowKeyInner::range_start_key(table_id).into_bytes());
FlowScoped::new(inner).to_bytes()
}
@@ -123,7 +123,12 @@ impl TableFlowKeyInner {
}
fn prefix(table_id: TableId) -> String {
format!("{}/{table_id}/", TABLE_FLOW_KEY_PREFIX)
format!("{}/{table_id}", TABLE_FLOW_KEY_PREFIX)
}
/// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
fn range_start_key(table_id: TableId) -> String {
format!("{}/", Self::prefix(table_id))
}
}

View File

@@ -19,7 +19,6 @@ use std::sync::Arc;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table::table_reference::TableReference;
use super::TABLE_INFO_KEY_PATTERN;
@@ -29,6 +28,7 @@ use crate::key::{DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
use crate::rpc::store::BatchGetRequest;
use crate::table_name::TableName;
/// The key stores the metadata of the table.
///

View File

@@ -20,7 +20,6 @@ use futures_util::stream::BoxStream;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
@@ -30,6 +29,7 @@ use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use crate::rpc::store::{BatchGetRequest, RangeRequest};
use crate::rpc::KeyValue;
use crate::table_name::TableName;
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub struct TableNameKey<'a> {
@@ -48,7 +48,7 @@ impl<'a> TableNameKey<'a> {
}
pub fn prefix_to_table(catalog: &str, schema: &str) -> String {
format!("{}/{}/{}/", TABLE_NAME_KEY_PREFIX, catalog, schema)
format!("{}/{}/{}", TABLE_NAME_KEY_PREFIX, catalog, schema)
}
}
@@ -56,7 +56,7 @@ impl Display for TableNameKey<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}{}",
"{}/{}",
Self::prefix_to_table(self.catalog, self.schema),
self.table
)
@@ -268,11 +268,7 @@ impl TableNameManager {
#[cfg(test)]
mod tests {
use futures::StreamExt;
use super::*;
use crate::kv_backend::KvBackend;
use crate::rpc::store::PutRequest;
#[test]
fn test_strip_table_name() {
@@ -328,39 +324,4 @@ mod tests {
assert_eq!(value.try_as_raw_value().unwrap(), literal);
assert_eq!(TableNameValue::try_from_raw_value(literal).unwrap(), value);
}
#[tokio::test]
async fn test_prefix_scan_tables() {
let memory_kv = Arc::new(MemoryKvBackend::<crate::error::Error>::new());
memory_kv
.put(PutRequest {
key: TableNameKey {
catalog: "greptime",
schema: "👉",
table: "t",
}
.to_bytes(),
value: vec![],
prev_kv: false,
})
.await
.unwrap();
memory_kv
.put(PutRequest {
key: TableNameKey {
catalog: "greptime",
schema: "👉👈",
table: "t",
}
.to_bytes(),
value: vec![],
prev_kv: false,
})
.await
.unwrap();
let manager = TableNameManager::new(memory_kv);
let items = manager.tables("greptime", "👉").collect::<Vec<_>>().await;
assert_eq!(items.len(), 1);
}
}

View File

@@ -0,0 +1,130 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use std::fmt::Display;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::metadata::TableId;
use super::{MetaKey, TABLE_REGION_KEY_PREFIX};
use crate::error::{InvalidTableMetadataSnafu, Result, SerdeJsonSnafu};
use crate::{impl_table_meta_value, DatanodeId};
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
#[deprecated(
since = "0.4.0",
note = "Please use the TableRouteManager's get_region_distribution method instead"
)]
#[derive(Debug, PartialEq)]
pub struct TableRegionKey {
table_id: TableId,
}
lazy_static! {
static ref TABLE_REGION_KEY_PATTERN: Regex =
Regex::new(&format!("^{TABLE_REGION_KEY_PREFIX}/([0-9]+)$")).unwrap();
}
impl TableRegionKey {
pub fn new(table_id: TableId) -> Self {
Self { table_id }
}
}
impl Display for TableRegionKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}/{}", TABLE_REGION_KEY_PREFIX, self.table_id)
}
}
impl<'a> MetaKey<'a, TableRegionKey> for TableRegionKey {
fn to_bytes(&self) -> Vec<u8> {
self.to_string().into_bytes()
}
fn from_bytes(bytes: &'a [u8]) -> Result<TableRegionKey> {
let key = std::str::from_utf8(bytes).map_err(|e| {
InvalidTableMetadataSnafu {
err_msg: format!(
"TableRegionKey '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
),
}
.build()
})?;
let captures =
TABLE_REGION_KEY_PATTERN
.captures(key)
.context(InvalidTableMetadataSnafu {
err_msg: format!("Invalid TableRegionKey '{key}'"),
})?;
// Safety: pass the regex check above
let table_id = captures[1].parse::<TableId>().unwrap();
Ok(TableRegionKey { table_id })
}
}
#[deprecated(
since = "0.4.0",
note = "Please use the TableRouteManager's get_region_distribution method instead"
)]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TableRegionValue {
pub region_distribution: RegionDistribution,
version: u64,
}
impl TableRegionValue {
pub fn new(region_distribution: RegionDistribution) -> Self {
Self {
region_distribution,
version: 0,
}
}
}
impl_table_meta_value! {TableRegionValue}
#[cfg(test)]
mod tests {
use super::*;
use crate::key::TableMetaValue;
#[test]
fn test_serialization() {
let key = TableRegionKey::new(24);
let raw_key = key.to_bytes();
assert_eq!(raw_key, b"__table_region/24");
let deserialized = TableRegionKey::from_bytes(b"__table_region/24").unwrap();
assert_eq!(key, deserialized);
let value = TableRegionValue {
region_distribution: RegionDistribution::from([(1, vec![1, 2, 3]), (2, vec![4, 5, 6])]),
version: 0,
};
let literal = br#"{"region_distribution":{"1":[1,2,3],"2":[4,5,6]},"version":0}"#;
assert_eq!(value.try_as_raw_value().unwrap(), literal);
assert_eq!(
TableRegionValue::try_from_raw_value(literal).unwrap(),
value,
);
}
}

View File

@@ -12,14 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::collections::HashMap;
use std::fmt::Display;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
use table::table_name::TableName;
use super::VIEW_INFO_KEY_PATTERN;
use crate::error::{InvalidViewInfoSnafu, Result};
@@ -82,30 +80,21 @@ impl<'a> MetaKey<'a, ViewInfoKey> for ViewInfoKey {
/// The VIEW info value that keeps the metadata.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ViewInfoValue {
/// The encoded logical plan
pub view_info: RawViewLogicalPlan,
/// The resolved fully table names in logical plan
pub table_names: HashSet<TableName>,
version: u64,
}
impl ViewInfoValue {
pub fn new(view_info: RawViewLogicalPlan, table_names: HashSet<TableName>) -> Self {
pub fn new(view_info: &RawViewLogicalPlan) -> Self {
Self {
view_info,
table_names,
view_info: view_info.clone(),
version: 0,
}
}
pub(crate) fn update(
&self,
new_view_info: RawViewLogicalPlan,
table_names: HashSet<TableName>,
) -> Self {
pub(crate) fn update(&self, new_view_info: RawViewLogicalPlan) -> Self {
Self {
view_info: new_view_info,
table_names,
version: self.version + 1,
}
}
@@ -116,8 +105,6 @@ pub struct ViewInfoManager {
kv_backend: KvBackendRef,
}
pub type ViewInfoManagerRef = Arc<ViewInfoManager>;
impl ViewInfoManager {
pub fn new(kv_backend: KvBackendRef) -> Self {
Self { kv_backend }
@@ -267,25 +254,9 @@ mod tests {
#[test]
fn test_value_serialization() {
let table_names = {
let mut set = HashSet::new();
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "a_table".to_string(),
});
set.insert(TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: "b_table".to_string(),
});
set
};
let value = ViewInfoValue {
view_info: vec![1, 2, 3],
version: 1,
table_names,
};
let serialized = value.try_as_raw_value().unwrap();
let deserialized = ViewInfoValue::try_from_raw_value(&serialized).unwrap();

View File

@@ -40,6 +40,7 @@ pub mod region_keeper;
pub mod rpc;
pub mod sequence;
pub mod state_store;
pub mod table_name;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
pub mod util;

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::collections::HashMap;
use std::result;
use api::v1::meta::ddl_task_request::Task;
@@ -39,11 +39,11 @@ use serde_with::{serde_as, DefaultOnNull};
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId};
use table::table_name::TableName;
use table::table_reference::TableReference;
use crate::error::{self, Result};
use crate::key::FlowId;
use crate::table_name::TableName;
/// DDL tasks
#[derive(Debug, Clone)]
@@ -274,7 +274,10 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
#[derive(Debug, Default)]
pub struct SubmitDdlTaskResponse {
pub key: Vec<u8>,
// `table_id`s for `CREATE TABLE` or `CREATE LOGICAL TABLES` task.
// For create physical table
// TODO(jeremy): remove it?
pub table_id: Option<TableId>,
// For create multi logical tables
pub table_ids: Vec<TableId>,
}
@@ -282,9 +285,11 @@ impl TryFrom<PbDdlTaskResponse> for SubmitDdlTaskResponse {
type Error = error::Error;
fn try_from(resp: PbDdlTaskResponse) -> Result<Self> {
let table_id = resp.table_id.map(|t| t.id);
let table_ids = resp.table_ids.into_iter().map(|t| t.id).collect();
Ok(Self {
key: resp.pid.map(|pid| pid.key).unwrap_or_default(),
table_id,
table_ids,
})
}
@@ -294,6 +299,9 @@ impl From<SubmitDdlTaskResponse> for PbDdlTaskResponse {
fn from(val: SubmitDdlTaskResponse) -> Self {
Self {
pid: Some(ProcedureId { key: val.key }),
table_id: val
.table_id
.map(|table_id| api::v1::TableId { id: table_id }),
table_ids: val
.table_ids
.into_iter()
@@ -324,14 +332,6 @@ impl CreateViewTask {
pub fn raw_logical_plan(&self) -> &Vec<u8> {
&self.create_view.logical_plan
}
pub fn table_names(&self) -> HashSet<TableName> {
self.create_view
.table_names
.iter()
.map(|t| t.clone().into())
.collect()
}
}
impl TryFrom<PbCreateViewTask> for CreateViewTask {

View File

@@ -25,11 +25,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use snafu::OptionExt;
use store_api::storage::{RegionId, RegionNumber};
use strum::AsRefStr;
use table::table_name::TableName;
use crate::error::{self, Result};
use crate::key::RegionDistribution;
use crate::peer::Peer;
use crate::table_name::TableName;
use crate::DatanodeId;
pub fn region_distribution(region_routes: &[RegionRoute]) -> RegionDistribution {

View File

@@ -16,8 +16,7 @@ use std::fmt::{Display, Formatter};
use api::v1::TableName as PbTableName;
use serde::{Deserialize, Serialize};
use crate::table_reference::TableReference;
use table::table_reference::TableReference;
#[derive(Debug, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
pub struct TableName {

View File

@@ -179,7 +179,7 @@ impl StateStore for ObjectStateStore {
))
})
.context(ListStateSnafu { path: key })?;
yield (key.into(), value.to_vec());
yield (key.into(), value);
}
}
});

View File

@@ -4,16 +4,12 @@ version.workspace = true
edition.workspace = true
license.workspace = true
[features]
testing = []
[lints]
workspace = true
[dependencies]
api.workspace = true
async-trait.workspace = true
bytes.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-recordbatch.workspace = true

View File

@@ -206,13 +206,6 @@ pub enum Error {
location: Location,
},
#[snafu(display("Failed to decode logical plan: {source}"))]
DecodePlan {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to do table mutation"))]
TableMutation {
source: BoxedError,
@@ -289,12 +282,11 @@ impl ErrorExt for Error {
| Error::InvalidFuncArgs { .. } => StatusCode::InvalidArguments,
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
Error::DecodePlan { source, .. }
| Error::Execute { source, .. }
| Error::ExecutePhysicalPlan { source, .. }
| Error::ProcedureService { source, .. }
| Error::TableMutation { source, .. } => source.status_code(),
Error::ExecutePhysicalPlan { source, .. } => source.status_code(),
Error::Execute { source, .. } => source.status_code(),
Error::ProcedureService { source, .. } | Error::TableMutation { source, .. } => {
source.status_code()
}
Error::PermissionDenied { .. } => StatusCode::PermissionDenied,
}

View File

@@ -18,8 +18,7 @@ mod function;
pub mod logical_plan;
pub mod prelude;
mod signature;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
use std::fmt::{Debug, Display, Formatter};
use std::sync::Arc;

View File

@@ -19,15 +19,12 @@ mod udf;
use std::sync::Arc;
use datafusion::catalog::CatalogProviderList;
use datafusion::logical_expr::LogicalPlan;
use datatypes::prelude::ConcreteDataType;
pub use expr::build_filter_from_timestamp;
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
pub use self::udaf::AggregateFunction;
pub use self::udf::ScalarUdf;
use crate::error::Result;
use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation};
use crate::logical_plan::accumulator::*;
use crate::signature::{Signature, Volatility};
@@ -71,25 +68,6 @@ pub fn create_aggregate_function(
)
}
/// The datafusion `[LogicalPlan]` decoder.
#[async_trait::async_trait]
pub trait SubstraitPlanDecoder {
/// Decode the [`LogicalPlan`] from bytes with the [`CatalogProviderList`].
/// When `optimize` is true, it will do the optimization for decoded plan.
///
/// TODO(dennis): It's not a good design for an API to do many things.
/// The `optimize` was introduced because of `query` and `catalog` cyclic dependency issue
/// I am happy to refactor it if we have a better solution.
async fn decode(
&self,
message: bytes::Bytes,
catalog_list: Arc<dyn CatalogProviderList>,
optimize: bool,
) -> Result<LogicalPlan>;
}
pub type SubstraitPlanDecoderRef = Arc<dyn SubstraitPlanDecoder + Send + Sync>;
#[cfg(test)]
mod tests {
use std::sync::Arc;

View File

@@ -22,7 +22,7 @@ use std::sync::Arc;
use datafusion::arrow::datatypes::Field;
use datafusion_common::Result;
use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
use datafusion_expr::function::AccumulatorArgs;
use datafusion_expr::{
Accumulator, AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf, AggregateUDFImpl,
};
@@ -129,13 +129,13 @@ impl AggregateUDFImpl for DfUdafAdapter {
(self.accumulator)(acc_args)
}
fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
fn state_fields(&self, name: &str, _: ArrowDataType, _: Vec<Field>) -> Result<Vec<Field>> {
let state_types = self.creator.state_types()?;
let fields = state_types
.into_iter()
.enumerate()
.map(|(i, t)| {
let name = format!("{}_{i}", args.name);
let name = format!("{name}_{i}");
Field::new(name, t.as_arrow_type(), true)
})
.collect::<Vec<_>>();

View File

@@ -108,10 +108,6 @@ impl ScalarUDFImpl for DfUdfAdapter {
fn invoke(&self, args: &[DfColumnarValue]) -> datafusion_common::Result<DfColumnarValue> {
(self.fun)(args)
}
fn invoke_no_args(&self, number_rows: usize) -> datafusion_common::Result<DfColumnarValue> {
Ok((self.fun)(&[])?.into_array(number_rows)?.into())
}
}
impl From<ScalarUdf> for DfScalarUDF {

View File

@@ -27,6 +27,10 @@ pub enum TypeSignature {
/// arbitrary number of arguments of an common type out of a list of valid types
// A function such as `concat` is `Variadic(vec![ConcreteDataType::String, ConcreteDataType::String])`
Variadic(Vec<ConcreteDataType>),
/// arbitrary number of arguments of an arbitrary but equal type
// A function such as `array` is `VariadicEqual`
// The first argument decides the type used for coercion
VariadicEqual,
/// One or more arguments with arbitrary types
VariadicAny,
/// fixed number of arguments of an arbitrary but equal type out of a list of valid types
@@ -63,7 +67,6 @@ impl Signature {
volatility,
}
}
/// variadic - Creates a variadic signature that represents an arbitrary number of arguments all from a type in common_types.
pub fn variadic(common_types: Vec<ConcreteDataType>, volatility: Volatility) -> Self {
Self {
@@ -71,6 +74,13 @@ impl Signature {
volatility,
}
}
/// variadic_equal - Creates a variadic signature that represents an arbitrary number of arguments of the same type.
pub fn variadic_equal(volatility: Volatility) -> Self {
Self {
type_signature: TypeSignature::VariadicEqual,
volatility,
}
}
/// variadic_any - Creates a variadic signature that represents an arbitrary number of arguments of any type.
pub fn variadic_any(volatility: Volatility) -> Self {
@@ -121,6 +131,7 @@ impl From<TypeSignature> for DfTypeSignature {
TypeSignature::Variadic(types) => {
DfTypeSignature::Variadic(concrete_types_to_arrow_types(types))
}
TypeSignature::VariadicEqual => DfTypeSignature::VariadicEqual,
TypeSignature::Uniform(n, types) => {
DfTypeSignature::Uniform(n, concrete_types_to_arrow_types(types))
}

View File

@@ -1,42 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use datafusion::catalog::CatalogProviderList;
use datafusion::logical_expr::LogicalPlan;
use crate::error::Result;
use crate::logical_plan::SubstraitPlanDecoder;
/// Dummy `[SubstraitPlanDecoder]` for test.
pub struct DummyDecoder;
impl DummyDecoder {
pub fn arc() -> Arc<Self> {
Arc::new(DummyDecoder)
}
}
#[async_trait::async_trait]
impl SubstraitPlanDecoder for DummyDecoder {
async fn decode(
&self,
_message: bytes::Bytes,
_catalog_list: Arc<dyn CatalogProviderList>,
_optimize: bool,
) -> Result<LogicalPlan> {
unreachable!()
}
}

View File

@@ -292,7 +292,7 @@ impl ExecutionPlanVisitor for MetricCollector {
// skip if no metric available
let Some(metric) = plan.metrics() else {
self.record_batch_metrics.plan_metrics.push(PlanMetrics {
plan: std::any::type_name::<Self>().to_string(),
plan: plan.name().to_string(),
level: self.current_level,
metrics: vec![],
});

View File

@@ -13,15 +13,13 @@ common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
lazy_static.workspace = true
num_cpus.workspace = true
once_cell.workspace = true
paste.workspace = true
prometheus.workspace = true
serde.workspace = true
snafu.workspace = true
tokio.workspace = true
tokio-metrics = "0.3"
tokio-metrics-collector = { git = "https://github.com/MichaelScofield/tokio-metrics-collector.git", rev = "89d692d5753d28564a7aac73c6ac5aba22243ba0" }
tokio-metrics-collector = "0.2"
tokio-util.workspace = true
[dev-dependencies]

View File

@@ -19,7 +19,6 @@ use std::sync::{Mutex, Once};
use common_telemetry::info;
use once_cell::sync::Lazy;
use paste::paste;
use serde::{Deserialize, Serialize};
use crate::{Builder, JoinHandle, Runtime};
@@ -27,28 +26,6 @@ const READ_WORKERS: usize = 8;
const WRITE_WORKERS: usize = 8;
const BG_WORKERS: usize = 8;
/// The options for the global runtimes.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct RuntimeOptions {
/// The number of threads to execute the runtime for global read operations.
pub read_rt_size: usize,
/// The number of threads to execute the runtime for global write operations.
pub write_rt_size: usize,
/// The number of threads to execute the runtime for global background operations.
pub bg_rt_size: usize,
}
impl Default for RuntimeOptions {
fn default() -> Self {
let cpus = num_cpus::get();
Self {
read_rt_size: cpus,
write_rt_size: cpus,
bg_rt_size: cpus,
}
}
}
pub fn create_runtime(runtime_name: &str, thread_name: &str, worker_threads: usize) -> Runtime {
info!("Creating runtime with runtime_name: {runtime_name}, thread_name: {thread_name}, work_threads: {worker_threads}.");
Builder::default()
@@ -135,26 +112,18 @@ static CONFIG_RUNTIMES: Lazy<Mutex<ConfigRuntimes>> =
/// # Panics
/// Panics when the global runtimes are already initialized.
/// You should call this function before using any runtime functions.
pub fn init_global_runtimes(options: &RuntimeOptions) {
pub fn init_global_runtimes(
read: Option<Runtime>,
write: Option<Runtime>,
background: Option<Runtime>,
) {
static START: Once = Once::new();
START.call_once(move || {
let mut c = CONFIG_RUNTIMES.lock().unwrap();
assert!(!c.already_init, "Global runtimes already initialized");
c.read_runtime = Some(create_runtime(
"global-read",
"global-read-worker",
options.read_rt_size,
));
c.write_runtime = Some(create_runtime(
"global-write",
"global-write-worker",
options.write_rt_size,
));
c.bg_runtime = Some(create_runtime(
"global-bg",
"global-bg-worker",
options.bg_rt_size,
));
c.read_runtime = read;
c.write_runtime = write;
c.bg_runtime = background;
});
}

View File

@@ -13,7 +13,7 @@
// limitations under the License.
pub mod error;
pub mod global;
mod global;
mod metrics;
mod repeated_task;
pub mod runtime;

Some files were not shown because too many files have changed in this diff Show More