mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-24 15:09:59 +00:00
Compare commits
11 Commits
v0.5.0
...
script_wra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24f5e56196 | ||
|
|
c85d569797 | ||
|
|
e95a8e070c | ||
|
|
b71bf11772 | ||
|
|
ee0a3972fc | ||
|
|
8fb40c66a4 | ||
|
|
e855f6370e | ||
|
|
fb5dcbc40c | ||
|
|
0d109436b8 | ||
|
|
cbae03af07 | ||
|
|
902e6ead60 |
@@ -40,11 +40,9 @@ runs:
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
env:
|
||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/$PROFILE_TARGET/greptime
|
||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
|
||||
2
.github/actions/upload-artifacts/action.yml
vendored
2
.github/actions/upload-artifacts/action.yml
vendored
@@ -22,7 +22,7 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
|
||||
4
.github/doc-label-config.yml
vendored
4
.github/doc-label-config.yml
vendored
@@ -1,4 +0,0 @@
|
||||
Doc not needed:
|
||||
- '- \[x\] This PR does not require documentation updates.'
|
||||
Doc update required:
|
||||
- '- \[ \] This PR does not require documentation updates.'
|
||||
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -15,6 +15,5 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR does not require documentation updates.
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
|
||||
7
.github/scripts/deploy-greptimedb.sh
vendored
7
.github/scripts/deploy-greptimedb.sh
vendored
@@ -107,9 +107,12 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.s3.secretName=s3-credentials \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
--set storage.credentials.secretCreation.enabled=true \
|
||||
--set storage.credentials.secretCreation.enableEncryption=false \
|
||||
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
10
.github/workflows/dev-build.yml
vendored
10
.github/workflows/dev-build.yml
vendored
@@ -55,18 +55,10 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
cargo_profile:
|
||||
type: choice
|
||||
description: The cargo profile to use in building GreptimeDB.
|
||||
default: nightly
|
||||
options:
|
||||
- dev
|
||||
- release
|
||||
- nightly
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
CARGO_PROFILE: ${{ inputs.cargo_profile }}
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
6
.github/workflows/develop.yml
vendored
6
.github/workflows/develop.yml
vendored
@@ -29,7 +29,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
typos:
|
||||
@@ -175,9 +175,6 @@ jobs:
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
@@ -189,7 +186,6 @@ jobs:
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
20
.github/workflows/doc-label.yml
vendored
20
.github/workflows/doc-label.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: "PR Doc Labeler"
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: github/issue-labeler@v3.3
|
||||
with:
|
||||
configuration-path: .github/doc-label-config.yml
|
||||
enable-versioned-regex: false
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: 1
|
||||
4
.github/workflows/nightly-ci.yml
vendored
4
.github/workflows/nightly-ci.yml
vendored
@@ -12,12 +12,11 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -52,7 +51,6 @@ jobs:
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
|
||||
@@ -9,7 +9,6 @@ on:
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
RUST_TOOLCHAIN: nightly-2023-10-21
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.6.0
|
||||
NEXT_RELEASE_VERSION: v0.5.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
|
||||
19
.github/workflows/user-doc-label-checker.yml
vendored
19
.github/workflows/user-doc-label-checker.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: Check user doc labels
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
|
||||
check_labels:
|
||||
name: Check doc labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: docker://agilepathway/pull-request-label-checker:latest
|
||||
with:
|
||||
one_of: Doc update required,Doc not needed
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
522
Cargo.lock
generated
522
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@@ -58,7 +58,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.5.0"
|
||||
version = "0.4.4"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -75,9 +75,7 @@ base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
@@ -90,7 +88,7 @@ etcd-client = "0.12"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a31ea166fc015ea7ff111ac94e26c3a5d64364d2" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
@@ -117,7 +115,6 @@ reqwest = { version = "0.11", default-features = false, features = [
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rust_decimal = "1.33"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
@@ -130,9 +127,8 @@ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
@@ -172,7 +168,7 @@ frontend = { path = "src/frontend" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
metric-engine = { path = "src/metric-engine" }
|
||||
mito = { path = "src/mito" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
@@ -193,7 +189,7 @@ git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
debug = true
|
||||
|
||||
[profile.nightly]
|
||||
inherits = "release"
|
||||
|
||||
@@ -12,10 +12,6 @@ rpc_runtime_size = 8
|
||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
require_lease_before_startup = false
|
||||
|
||||
# Initialize all regions in the background during the startup.
|
||||
# By default, it provides services after all regions have been initialized.
|
||||
initialize_region_in_background = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
interval = "3s"
|
||||
@@ -33,15 +29,9 @@ connect_timeout = "1s"
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
# WAL options.
|
||||
# Currently, users are expected to choose the wal through the provider field.
|
||||
# When a wal provider is chose, the user should comment out all other wal config
|
||||
# except those corresponding to the chosen one.
|
||||
# WAL options, see `standalone.example.toml`.
|
||||
[wal]
|
||||
# WAL data directory
|
||||
provider = "raft_engine"
|
||||
|
||||
# Raft-engine wal options, see `standalone.example.toml`.
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
file_size = "256MB"
|
||||
purge_threshold = "4GB"
|
||||
@@ -49,21 +39,10 @@ purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
sync_write = false
|
||||
|
||||
# Kafka wal options, see `standalone.example.toml`.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
# max_batch_size = "4MB"
|
||||
# linger = "200ms"
|
||||
# produce_record_timeout = "100ms"
|
||||
# backoff_init = "500ms"
|
||||
# backoff_max = "10s"
|
||||
# backoff_base = 2
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
@@ -74,12 +53,6 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Custom storage options
|
||||
#[[storage.providers]]
|
||||
#type = "S3"
|
||||
#[[storage.providers]]
|
||||
#type = "Gcs"
|
||||
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
@@ -109,31 +82,8 @@ vector_cache_size = "512MB"
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
# - 0: using the default value (1/4 of cpu cores).
|
||||
# - 1: scan in current thread.
|
||||
# - n: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
# Datanode export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -77,19 +77,3 @@ tcp_nodelay = true
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
# Frontend export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -7,10 +7,10 @@ server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode selector type.
|
||||
# - "lease_based" (default value).
|
||||
# - "load_based"
|
||||
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
selector = "lease_based"
|
||||
# - "LeaseBased" (default value).
|
||||
# - "LoadBased"
|
||||
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
|
||||
selector = "LeaseBased"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
@@ -42,53 +42,3 @@ first_heartbeat_estimate = "1000ms"
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
# Available wal providers:
|
||||
# - "raft_engine" (default)
|
||||
# - "kafka"
|
||||
provider = "raft_engine"
|
||||
|
||||
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||
|
||||
# Kafka wal config.
|
||||
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
# topic_name_prefix = "greptimedb_wal_topic"
|
||||
# Number of partitions per topic.
|
||||
# num_partitions = 1
|
||||
# Expected number of replicas of each partition.
|
||||
# replication_factor = 1
|
||||
# Above which a topic creation operation will be cancelled.
|
||||
# create_topic_timeout = "30s"
|
||||
# The initial backoff for kafka clients.
|
||||
# backoff_init = "500ms"
|
||||
# The maximum backoff for kafka clients.
|
||||
# backoff_max = "10s"
|
||||
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
# backoff_base = 2
|
||||
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
# Metasrv export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -80,49 +80,8 @@ enable = true
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# Available wal providers:
|
||||
# - "raft_engine" (default)
|
||||
# - "kafka"
|
||||
provider = "raft_engine"
|
||||
|
||||
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||
|
||||
# Kafka wal options.
|
||||
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
# topic_name_prefix = "greptimedb_wal_topic"
|
||||
# Number of partitions per topic.
|
||||
# num_partitions = 1
|
||||
# Expected number of replicas of each partition.
|
||||
# replication_factor = 1
|
||||
|
||||
# The maximum log size a kafka batch producer could buffer.
|
||||
# max_batch_size = "4MB"
|
||||
# The linger duration of a kafka batch producer.
|
||||
# linger = "200ms"
|
||||
# The maximum amount of time (in milliseconds) to wait for Kafka records to be returned.
|
||||
# produce_record_timeout = "100ms"
|
||||
# Above which a topic creation operation will be cancelled.
|
||||
# create_topic_timeout = "30s"
|
||||
|
||||
# The initial backoff for kafka clients.
|
||||
# backoff_init = "500ms"
|
||||
# The maximum backoff for kafka clients.
|
||||
# backoff_max = "10s"
|
||||
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
# backoff_base = 2
|
||||
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
@@ -163,12 +122,6 @@ type = "File"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Custom storage options
|
||||
#[[storage.providers]]
|
||||
#type = "S3"
|
||||
#[[storage.providers]]
|
||||
#type = "Gcs"
|
||||
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
[region_engine.mito]
|
||||
@@ -198,13 +151,6 @@ vector_cache_size = "512MB"
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
# - 0: using the default value (1/4 of cpu cores).
|
||||
# - 1: scan in current thread.
|
||||
# - n: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
@@ -218,21 +164,3 @@ parallel_scan_channel_size = 32
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
# Whether to append logs to stdout. Defaults to true.
|
||||
# append_stdout = true
|
||||
|
||||
# Standalone export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -26,5 +26,4 @@ ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
RUN cargo install cargo-nextest --locked
|
||||
|
||||
@@ -43,5 +43,4 @@ ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
RUN cargo install cargo-nextest --locked
|
||||
|
||||
@@ -44,5 +44,4 @@ ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
|
||||
# Install nextest.
|
||||
RUN cargo install cargo-binstall --locked
|
||||
RUN cargo binstall cargo-nextest --no-confirm
|
||||
RUN cargo install cargo-nextest --locked
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2023-12-19"
|
||||
channel = "nightly-2023-10-21"
|
||||
|
||||
157
scripts/run-pyo3-greptime.sh
Executable file
157
scripts/run-pyo3-greptime.sh
Executable file
@@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script configures the environment to run 'greptime' with the required Python version
|
||||
|
||||
# This script should be compatible both in Linux and macOS
|
||||
OS_TYPE="$(uname)"
|
||||
readonly OS_TYPE
|
||||
|
||||
check_command_existence() {
|
||||
command -v "$1" &> /dev/null
|
||||
}
|
||||
|
||||
get_python_version() {
|
||||
case "$OS_TYPE" in
|
||||
Darwin)
|
||||
otool -L $GREPTIME_BIN_PATH | grep -o 'Python.framework/Versions/3.[0-9]\+/Python' | grep -o '3.[0-9]\+'
|
||||
;;
|
||||
Linux)
|
||||
ldd $GREPTIME_BIN_PATH | grep -o 'libpython3\.[0-9]\+' | grep -o '3\.[0-9]\+'
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported OS type: $OS_TYPE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
setup_virtualenv() {
|
||||
local req_py_version="$1"
|
||||
local env_name="GreptimeTmpVenv$req_py_version"
|
||||
virtualenv --python=python"$req_py_version" "$env_name"
|
||||
source "$env_name/bin/activate"
|
||||
}
|
||||
|
||||
setup_conda_env() {
|
||||
local req_py_version="$1"
|
||||
local conda_base
|
||||
conda_base=$(conda info --base) || { echo "Error obtaining conda base directory"; exit 1; }
|
||||
. "$conda_base/etc/profile.d/conda.sh"
|
||||
|
||||
if ! conda list --name "GreptimeTmpPyO3Env$req_py_version" &> /dev/null; then
|
||||
conda create --yes --name "GreptimeTmpPyO3Env$req_py_version" python="$req_py_version"
|
||||
fi
|
||||
|
||||
conda activate "GreptimeTmpPyO3Env$req_py_version"
|
||||
}
|
||||
|
||||
GREPTIME_BIN_PATH="./greptime"
|
||||
YES="false"
|
||||
|
||||
usage() {
|
||||
echo "Usage:"
|
||||
echo " $0 -f <greptime-bin-path> [-y] <args-pass-to-greptime>"
|
||||
echo "Set $PY_ENV_MAN to 1 to use virtualenv, 2 to use conda"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function parse_args() {
|
||||
while getopts ":f:y" opt; do
|
||||
case $opt in
|
||||
f)
|
||||
GREPTIME_BIN_PATH=$OPTARG
|
||||
;;
|
||||
y)
|
||||
YES="yes"
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
echo "Option -$OPTARG requires an argument." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND -1))
|
||||
|
||||
REST_ARGS=$*
|
||||
|
||||
if [ -z "$GREPTIME_BIN_PATH" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
echo "Run greptime binary at '$GREPTIME_BIN_PATH' (yes=$YES)..."
|
||||
echo "The args pass to greptime: '$REST_ARGS'"
|
||||
}
|
||||
|
||||
# Set library path and pass all arguments to greptime to run it
|
||||
execute_greptime() {
|
||||
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
||||
DYLD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
||||
LD_LIBRARY_PATH="${CONDA_PREFIX:-$PREFIX}/lib:${LD_LIBRARY_PATH:-}" $GREPTIME_BIN_PATH $@
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args $@
|
||||
|
||||
local req_py_version
|
||||
req_py_version=$(get_python_version)
|
||||
readonly req_py_version
|
||||
|
||||
if [[ -z "$req_py_version" ]]; then
|
||||
if $GREPTIME_BIN_PATH --version &> /dev/null; then
|
||||
$GREPTIME_BIN_PATH $REST_ARGS
|
||||
else
|
||||
echo "The 'greptime' binary is not valid or encountered an error."
|
||||
$GREPTIME_BIN_PATH --version
|
||||
exit 1
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "The required version of Python shared library is $req_py_version"
|
||||
|
||||
# if YES exist, assign it to yn, else read from stdin
|
||||
if [[ -z "$YES" ]]; then
|
||||
echo "Now this script will try to install or find correct Python Version"
|
||||
echo "Do you want to continue? (yes/no): "
|
||||
read -r yn
|
||||
else
|
||||
yn="$YES"
|
||||
fi
|
||||
case $yn in
|
||||
[Yy]* ) ;;
|
||||
[Nn]* ) exit;;
|
||||
* ) echo "Please answer yes or no.";;
|
||||
esac
|
||||
|
||||
# if USE_ENV exist, assign it to option
|
||||
# else read from stdin
|
||||
if [[ -z "$PY_ENV_MAN" ]]; then
|
||||
echo "Do you want to use virtualenv or conda? (virtualenv(1)/conda(2)): "
|
||||
read -r option
|
||||
else
|
||||
option="$PY_ENV_MAN"
|
||||
fi
|
||||
|
||||
case $option in
|
||||
1)
|
||||
setup_virtualenv "$req_py_version"
|
||||
;;
|
||||
2)
|
||||
setup_conda_env "$req_py_version"
|
||||
;;
|
||||
*)
|
||||
echo "Please input 1 or 2"; exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
execute_greptime $REST_ARGS
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -535,8 +535,11 @@ pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
||||
|
||||
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
||||
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
||||
let (hi, lo) = v.split_value();
|
||||
v1::Decimal128 { hi, lo }
|
||||
let value = v.val();
|
||||
v1::Decimal128 {
|
||||
hi: (value >> 64) as i64,
|
||||
lo: value as i64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pb_value_to_value_ref<'a>(
|
||||
@@ -577,9 +580,9 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
||||
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
||||
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
||||
ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
||||
ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
||||
ValueData::IntervalMonthDayNanoValue(v) => {
|
||||
ValueData::IntervalYearMonthValues(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
||||
ValueData::IntervalDayTimeValues(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
||||
ValueData::IntervalMonthDayNanoValues(v) => {
|
||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||
ValueRef::Interval(interval)
|
||||
}
|
||||
@@ -983,13 +986,13 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
},
|
||||
Value::Interval(v) => match v.unit() {
|
||||
IntervalUnit::YearMonth => v1::Value {
|
||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||
value_data: Some(ValueData::IntervalYearMonthValues(v.to_i32())),
|
||||
},
|
||||
IntervalUnit::DayTime => v1::Value {
|
||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||
value_data: Some(ValueData::IntervalDayTimeValues(v.to_i64())),
|
||||
},
|
||||
IntervalUnit::MonthDayNano => v1::Value {
|
||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||
value_data: Some(ValueData::IntervalMonthDayNanoValues(
|
||||
convert_i128_to_interval(v.to_i128()),
|
||||
)),
|
||||
},
|
||||
@@ -1008,9 +1011,12 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
},
|
||||
Value::Decimal128(v) => {
|
||||
let (hi, lo) = v.split_value();
|
||||
v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
|
||||
}
|
||||
}
|
||||
Value::List(_) => return None,
|
||||
};
|
||||
|
||||
@@ -1045,9 +1051,9 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
|
||||
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
|
||||
ValueData::TimeNanosecondValue(_) => ColumnDataType::TimeNanosecond,
|
||||
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
ValueData::IntervalYearMonthValues(_) => ColumnDataType::IntervalYearMonth,
|
||||
ValueData::IntervalDayTimeValues(_) => ColumnDataType::IntervalDayTime,
|
||||
ValueData::IntervalMonthDayNanoValues(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||
@@ -1103,10 +1109,10 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Interval(v) => Some(match v.unit() {
|
||||
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
|
||||
IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
|
||||
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValues(v.to_i32()),
|
||||
IntervalUnit::DayTime => ValueData::IntervalDayTimeValues(v.to_i64()),
|
||||
IntervalUnit::MonthDayNano => {
|
||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||
ValueData::IntervalMonthDayNanoValues(convert_i128_to_interval(v.to_i128()))
|
||||
}
|
||||
}),
|
||||
Value::Duration(v) => Some(match v.unit() {
|
||||
@@ -1115,7 +1121,10 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
Value::Decimal128(v) => {
|
||||
let (hi, lo) = v.split_value();
|
||||
Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
|
||||
}
|
||||
Value::List(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ arc-swap = "1.0"
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
build-data = "0.1"
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
@@ -24,7 +23,7 @@ common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
@@ -34,11 +33,10 @@ meta-client.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
paste = "1.0"
|
||||
prometheus.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_json = "1.0"
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
|
||||
@@ -13,20 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod columns;
|
||||
mod memory_table;
|
||||
mod table_names;
|
||||
mod tables;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, INFORMATION_SCHEMA_NAME};
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use datatypes::schema::SchemaRef;
|
||||
use futures_util::StreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use snafu::ResultExt;
|
||||
use store_api::data_source::DataSource;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
@@ -36,102 +32,43 @@ use table::metadata::{
|
||||
};
|
||||
use table::thin_table::{ThinTable, ThinTableAdapter};
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::tables::InformationSchemaTables;
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
// Memory tables in `information_schema`.
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[
|
||||
ENGINES,
|
||||
COLUMN_PRIVILEGES,
|
||||
COLUMN_STATISTICS,
|
||||
BUILD_INFO,
|
||||
];
|
||||
}
|
||||
pub const TABLES: &str = "tables";
|
||||
pub const COLUMNS: &str = "columns";
|
||||
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<INFORMATION_SCHEMA_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
}
|
||||
|
||||
impl InformationSchemaProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
let mut provider = Self {
|
||||
Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
tables: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
/// Returns table names in the order of table id.
|
||||
pub fn table_names(&self) -> Vec<String> {
|
||||
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||
|
||||
tables.sort_by(|t1, t2| {
|
||||
t1.table_info()
|
||||
.table_id()
|
||||
.partial_cmp(&t2.table_info().table_id())
|
||||
.unwrap()
|
||||
});
|
||||
tables
|
||||
.into_iter()
|
||||
.map(|t| t.table_info().name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a map of [TableRef] in information schema.
|
||||
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||
assert!(!self.tables.is_empty());
|
||||
|
||||
&self.tables
|
||||
}
|
||||
|
||||
/// Returns the [TableRef] by table name.
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.tables.get(name).cloned()
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).unwrap());
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
|
||||
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||
/// Build a map of [TableRef] in information schema.
|
||||
/// Including `tables` and `columns`.
|
||||
pub fn build(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
) -> HashMap<String, TableRef> {
|
||||
let provider = Self::new(catalog_name, catalog_manager);
|
||||
|
||||
let mut schema = HashMap::new();
|
||||
schema.insert(TABLES.to_owned(), provider.table(TABLES).unwrap());
|
||||
schema.insert(COLUMNS.to_owned(), provider.table(COLUMNS).unwrap());
|
||||
schema
|
||||
}
|
||||
|
||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||
self.information_table(name).map(|table| {
|
||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||
let filter_pushdown = FilterPushDownType::Unsupported;
|
||||
@@ -152,10 +89,6 @@ impl InformationSchemaProvider {
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
ENGINES => setup_memory_table!(ENGINES),
|
||||
COLUMN_PRIVILEGES => setup_memory_table!(COLUMN_PRIVILEGES),
|
||||
COLUMN_STATISTICS => setup_memory_table!(COLUMN_STATISTICS),
|
||||
BUILD_INFO => setup_memory_table!(BUILD_INFO),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -169,9 +102,9 @@ impl InformationSchemaProvider {
|
||||
.unwrap();
|
||||
let table_info = TableInfoBuilder::default()
|
||||
.table_id(table.table_id())
|
||||
.name(table.table_name().to_string())
|
||||
.name(table.table_name().to_owned())
|
||||
.catalog_name(catalog_name)
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||
.schema_name(INFORMATION_SCHEMA_NAME.to_owned())
|
||||
.meta(table_meta)
|
||||
.table_type(table.table_type())
|
||||
.build()
|
||||
@@ -238,12 +171,11 @@ impl DataSource for InformationTableDataSource {
|
||||
None => batch,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
let stream = RecordBatchStreamAdaptor {
|
||||
schema: projected_schema,
|
||||
stream: Box::pin(stream),
|
||||
output_ordering: None,
|
||||
};
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,8 +16,8 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
|
||||
SEMANTIC_TYPE_TIME_INDEX,
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD,
|
||||
SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
@@ -33,7 +33,8 @@ use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use super::{InformationTable, COLUMNS};
|
||||
use super::tables::InformationSchemaTables;
|
||||
use super::{InformationTable, COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
@@ -101,7 +102,7 @@ impl InformationTable for InformationSchemaColumns {
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_columns()
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
@@ -147,8 +148,8 @@ impl InformationSchemaColumnsBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.columns` virtual table
|
||||
async fn make_columns(&mut self) -> Result<RecordBatch> {
|
||||
/// Construct the `information_schema.tables` virtual table
|
||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
@@ -162,38 +163,48 @@ impl InformationSchemaColumnsBuilder {
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
for table_name in catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await?
|
||||
{
|
||||
if let Some(table) = catalog_manager
|
||||
let (keys, schema) = if let Some(table) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await?
|
||||
{
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
} else if keys.contains(&idx) {
|
||||
SEMANTIC_TYPE_PRIMARY_KEY
|
||||
} else {
|
||||
SEMANTIC_TYPE_FIELD
|
||||
};
|
||||
|
||||
self.add_column(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
&column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
(keys.clone(), schema)
|
||||
} else {
|
||||
unreachable!();
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
(vec![], InformationSchemaColumns::schema())
|
||||
} else if table_name == TABLES {
|
||||
(vec![], InformationSchemaTables::schema())
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let semantic_type = if column.is_time_index() {
|
||||
SEMANTIC_TYPE_TIME_INDEX
|
||||
} else if keys.contains(&idx) {
|
||||
SEMANTIC_TYPE_PRIMARY_KEY
|
||||
} else {
|
||||
SEMANTIC_TYPE_FIELD
|
||||
};
|
||||
self.add_column(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
&column.name,
|
||||
&column.data_type.name(),
|
||||
semantic_type,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -227,7 +238,6 @@ impl InformationSchemaColumnsBuilder {
|
||||
Arc::new(self.data_types.finish()),
|
||||
Arc::new(self.semantic_types.finish()),
|
||||
];
|
||||
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
@@ -244,7 +254,7 @@ impl DfPartitionStream for InformationSchemaColumns {
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_columns()
|
||||
.make_tables()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
|
||||
@@ -1,214 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod tables;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::TableId;
|
||||
pub use tables::get_schema_columns;
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::InformationTable;
|
||||
|
||||
/// A memory table with specified schema and columns.
|
||||
pub(super) struct MemoryTable {
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTable {
|
||||
/// Creates a memory table with table id, name, schema and columns.
|
||||
pub(super) fn new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_id,
|
||||
table_name,
|
||||
schema,
|
||||
columns,
|
||||
}
|
||||
}
|
||||
|
||||
fn builder(&self) -> MemoryTableBuilder {
|
||||
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for MemoryTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
self.table_name
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct MemoryTableBuilder {
|
||||
schema: SchemaRef,
|
||||
columns: Vec<VectorRef>,
|
||||
}
|
||||
|
||||
impl MemoryTableBuilder {
|
||||
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||
Self { schema, columns }
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.{table_name}` virtual table
|
||||
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||
if self.columns.is_empty() {
|
||||
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||
} else {
|
||||
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||
.context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for MemoryTable {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.memory_records()
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_recordbatch::RecordBatches;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_table() {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
|
||||
let table = MemoryTable::new(
|
||||
42,
|
||||
"test",
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec!["a1", "a2"])),
|
||||
Arc::new(StringVector::from(vec!["b1", "b2"])),
|
||||
],
|
||||
);
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream().unwrap();
|
||||
|
||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
"\
|
||||
+----+----+
|
||||
| a | b |
|
||||
+----+----+
|
||||
| a1 | b1 |
|
||||
| a2 | b2 |
|
||||
+----+----+",
|
||||
batches.pretty_print().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_empty_memory_table() {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
|
||||
]));
|
||||
|
||||
let table = MemoryTable::new(42, "test", schema.clone(), vec![]);
|
||||
|
||||
assert_eq!(42, table.table_id());
|
||||
assert_eq!("test", table.table_name());
|
||||
assert_eq!(schema, InformationTable::schema(&table));
|
||||
|
||||
let stream = table.to_stream().unwrap();
|
||||
|
||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
"\
|
||||
+---+---+
|
||||
| a | b |
|
||||
+---+---+
|
||||
+---+---+",
|
||||
batches.pretty_print().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use crate::information_schema::table_names::*;
|
||||
|
||||
const UNKNOWN: &str = "unknown";
|
||||
|
||||
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
COLUMN_PRIVILEGES => (
|
||||
string_columns(&[
|
||||
"GRANTEE",
|
||||
"TABLE_CATALOG",
|
||||
"TABLE_SCHEMA",
|
||||
"TABLE_NAME",
|
||||
"COLUMN_NAME",
|
||||
"PRIVILEGE_TYPE",
|
||||
"IS_GRANTABLE",
|
||||
]),
|
||||
vec![],
|
||||
),
|
||||
|
||||
COLUMN_STATISTICS => (
|
||||
string_columns(&[
|
||||
"SCHEMA_NAME",
|
||||
"TABLE_NAME",
|
||||
"COLUMN_NAME",
|
||||
// TODO(dennis): It must be a JSON type, but we don't support it yet
|
||||
"HISTOGRAM",
|
||||
]),
|
||||
vec![],
|
||||
),
|
||||
|
||||
ENGINES => (
|
||||
string_columns(&[
|
||||
"ENGINE",
|
||||
"SUPPORT",
|
||||
"COMMENT",
|
||||
"TRANSACTIONS",
|
||||
"XA",
|
||||
"SAVEPOINTS",
|
||||
]),
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec![MITO_ENGINE])),
|
||||
Arc::new(StringVector::from(vec!["DEFAULT"])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
"Storage engine for time-series data",
|
||||
])),
|
||||
Arc::new(StringVector::from(vec!["NO"])),
|
||||
Arc::new(StringVector::from(vec!["NO"])),
|
||||
Arc::new(StringVector::from(vec!["NO"])),
|
||||
],
|
||||
),
|
||||
|
||||
BUILD_INFO => (
|
||||
string_columns(&[
|
||||
"GIT_BRANCH",
|
||||
"GIT_COMMIT",
|
||||
"GIT_COMMIT_SHORT",
|
||||
"GIT_DIRTY",
|
||||
"PKG_VERSION",
|
||||
]),
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
build_data::get_git_branch().unwrap_or_else(|_| UNKNOWN.to_string())
|
||||
])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
build_data::get_git_commit().unwrap_or_else(|_| UNKNOWN.to_string())
|
||||
])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
build_data::get_git_commit_short().unwrap_or_else(|_| UNKNOWN.to_string())
|
||||
])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
build_data::get_git_dirty().map_or(UNKNOWN.to_string(), |v| v.to_string())
|
||||
])),
|
||||
Arc::new(StringVector::from(vec![option_env!("CARGO_PKG_VERSION")])),
|
||||
],
|
||||
),
|
||||
|
||||
_ => unreachable!("Unknown table in information_schema: {}", table_name),
|
||||
};
|
||||
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
|
||||
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||
names.iter().map(|name| string_column(name)).collect()
|
||||
}
|
||||
|
||||
fn string_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::string_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_string_columns() {
|
||||
let columns = ["a", "b", "c"];
|
||||
let column_schemas = string_columns(&columns);
|
||||
|
||||
assert_eq!(3, column_schemas.len());
|
||||
for (i, name) in columns.iter().enumerate() {
|
||||
let cs = column_schemas.get(i).unwrap();
|
||||
|
||||
assert_eq!(*name, cs.name);
|
||||
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// All table names in `information_schema`.
|
||||
|
||||
pub const TABLES: &str = "tables";
|
||||
pub const COLUMNS: &str = "columns";
|
||||
pub const ENGINES: &str = "engines";
|
||||
pub const COLUMN_PRIVILEGES: &str = "column_privileges";
|
||||
pub const COLUMN_STATISTICS: &str = "column_statistics";
|
||||
pub const BUILD_INFO: &str = "build_info";
|
||||
@@ -15,7 +15,10 @@
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
@@ -30,7 +33,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::TABLES;
|
||||
use super::{COLUMNS, TABLES};
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
@@ -175,8 +178,29 @@ impl InformationSchemaTablesBuilder {
|
||||
Some(&table_info.meta.engine),
|
||||
);
|
||||
} else {
|
||||
unreachable!();
|
||||
}
|
||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
||||
if table_name == COLUMNS {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
} else if table_name == TABLES {
|
||||
self.add_table(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
&table_name,
|
||||
TableType::Temporary,
|
||||
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
|
||||
None,
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::error::{
|
||||
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, Result as CatalogResult,
|
||||
TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -81,11 +81,6 @@ impl KvBackendCatalogManager {
|
||||
cache_invalidator,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
// The catalog name is not used in system_catalog, so let it empty
|
||||
"".to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -127,11 +122,13 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.try_collect::<BTreeSet<_>>()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListSchemasSnafu { catalog })?;
|
||||
.context(ListSchemasSnafu { catalog })?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
keys.extend(self.system_catalog.schema_names());
|
||||
keys.extend_from_slice(&self.system_catalog.schema_names());
|
||||
|
||||
Ok(keys.into_iter().collect())
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
|
||||
@@ -234,11 +231,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
// a new catalog is created.
|
||||
/// Existing system tables:
|
||||
/// - public.numbers
|
||||
/// - information_schema.{tables}
|
||||
/// - information_schema.tables
|
||||
/// - information_schema.columns
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
@@ -248,7 +245,7 @@ impl SystemCatalog {
|
||||
|
||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table_names()
|
||||
vec![TABLES.to_string(), COLUMNS.to_string()]
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||
} else {
|
||||
@@ -262,7 +259,7 @@ impl SystemCatalog {
|
||||
|
||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
table == TABLES || table == COLUMNS
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
table == NUMBERS_TABLE_NAME
|
||||
} else {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(trait_upcasting)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
|
||||
@@ -18,9 +18,7 @@ use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -137,18 +135,6 @@ impl MemoryCatalogManager {
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
manager
|
||||
.register_schema_sync(RegisterSchemaRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
manager
|
||||
}
|
||||
@@ -257,12 +243,10 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
let information_schema = InformationSchemaProvider::build(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
let mut catalog = HashMap::new();
|
||||
catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
|
||||
catalog
|
||||
|
||||
@@ -35,7 +35,7 @@ prost.workspace = true
|
||||
rand.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
tokio-stream = { workspace = true, features = ["net"] }
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamWrapper;
|
||||
use common_recordbatch::RecordBatchStreamAdaptor;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use futures_util::StreamExt;
|
||||
@@ -315,7 +315,7 @@ impl Database {
|
||||
yield Ok(record_batch);
|
||||
}
|
||||
}));
|
||||
let record_batch_stream = RecordBatchStreamWrapper {
|
||||
let record_batch_stream = RecordBatchStreamAdaptor {
|
||||
schema,
|
||||
stream,
|
||||
output_ordering: None,
|
||||
|
||||
@@ -131,15 +131,3 @@ impl From<Status> for Error {
|
||||
Self::Server { code, msg }
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_retry(&self) -> bool {
|
||||
!matches!(
|
||||
self,
|
||||
Self::RegionServer {
|
||||
code: Code::InvalidArgument,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,12 +23,13 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::datanode_manager::{AffectedRows, Datanode};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use prost::Message;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::Error::RegionServer;
|
||||
use crate::error::{
|
||||
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||
MissingFieldSnafu, Result, ServerSnafu,
|
||||
@@ -44,7 +45,7 @@ pub struct RegionRequester {
|
||||
impl Datanode for RegionRequester {
|
||||
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
|
||||
self.handle_inner(request).await.map_err(|err| {
|
||||
if err.should_retry() {
|
||||
if matches!(err, RegionServer { .. }) {
|
||||
meta_error::Error::RetryLater {
|
||||
source: BoxedError::new(err),
|
||||
}
|
||||
@@ -136,7 +137,7 @@ impl RegionRequester {
|
||||
yield Ok(record_batch);
|
||||
}
|
||||
}));
|
||||
let record_batch_stream = RecordBatchStreamWrapper {
|
||||
let record_batch_stream = RecordBatchStreamAdaptor {
|
||||
schema,
|
||||
stream,
|
||||
output_ordering: None,
|
||||
|
||||
@@ -40,7 +40,6 @@ etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
frontend.workspace = true
|
||||
futures.workspace = true
|
||||
human-panic = "1.2.2"
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv.workspace = true
|
||||
@@ -59,7 +58,6 @@ serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
substrait.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,12 +16,79 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use clap::{FromArgMatches, Parser, Subcommand};
|
||||
use clap::Parser;
|
||||
use cmd::error::Result;
|
||||
use cmd::options::{CliOptions, Options};
|
||||
use cmd::{
|
||||
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
||||
};
|
||||
use cmd::options::{Options, TopLevelOptions};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info, TracingOptions};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref APP_VERSION: prometheus::IntGaugeVec =
|
||||
prometheus::register_int_gauge_vec!("app_version", "app version", &["short_version", "version"]).unwrap();
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(name = "greptimedb", version = print_version())]
|
||||
struct Command {
|
||||
#[clap(long)]
|
||||
log_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
log_level: Option<String>,
|
||||
#[clap(subcommand)]
|
||||
subcmd: SubCommand,
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
#[clap(long)]
|
||||
tokio_console_addr: Option<String>,
|
||||
}
|
||||
|
||||
pub enum Application {
|
||||
Datanode(datanode::Instance),
|
||||
Frontend(frontend::Instance),
|
||||
Metasrv(metasrv::Instance),
|
||||
Standalone(standalone::Instance),
|
||||
Cli(cli::Instance),
|
||||
}
|
||||
|
||||
impl Application {
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.start().await,
|
||||
Application::Frontend(instance) => instance.start().await,
|
||||
Application::Metasrv(instance) => instance.start().await,
|
||||
Application::Standalone(instance) => instance.start().await,
|
||||
Application::Cli(instance) => instance.start().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
Application::Datanode(instance) => instance.stop().await,
|
||||
Application::Frontend(instance) => instance.stop().await,
|
||||
Application::Metasrv(instance) => instance.stop().await,
|
||||
Application::Standalone(instance) => instance.stop().await,
|
||||
Application::Cli(instance) => instance.stop().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Command {
|
||||
async fn build(self, opts: Options) -> Result<Application> {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
fn load_options(&self) -> Result<Options> {
|
||||
let top_level_opts = self.top_level_options();
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
|
||||
fn top_level_options(&self) -> TopLevelOptions {
|
||||
TopLevelOptions {
|
||||
log_dir: self.log_dir.clone(),
|
||||
log_level: self.log_level.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
@@ -38,41 +105,40 @@ enum SubCommand {
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self, opts: Options) -> Result<Box<dyn App>> {
|
||||
let app: Box<dyn App> = match (self, opts) {
|
||||
async fn build(self, opts: Options) -> Result<Application> {
|
||||
match (self, opts) {
|
||||
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
|
||||
let app = cmd.build(*dn_opts).await?;
|
||||
Box::new(app) as _
|
||||
Ok(Application::Datanode(app))
|
||||
}
|
||||
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
|
||||
let app = cmd.build(*fe_opts).await?;
|
||||
Box::new(app) as _
|
||||
Ok(Application::Frontend(app))
|
||||
}
|
||||
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
|
||||
let app = cmd.build(*meta_opts).await?;
|
||||
Box::new(app) as _
|
||||
Ok(Application::Metasrv(app))
|
||||
}
|
||||
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
|
||||
let app = cmd.build(*opts).await?;
|
||||
Box::new(app) as _
|
||||
Ok(Application::Standalone(app))
|
||||
}
|
||||
(SubCommand::Cli(cmd), Options::Cli(_)) => {
|
||||
let app = cmd.build().await?;
|
||||
Box::new(app) as _
|
||||
Ok(Application::Cli(app))
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
};
|
||||
Ok(app)
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Datanode(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Frontend(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Metasrv(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Standalone(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Cli(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Datanode(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Frontend(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Metasrv(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Standalone(cmd) => cmd.load_options(top_level_opts),
|
||||
SubCommand::Cli(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -89,49 +155,90 @@ impl fmt::Display for SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn print_version() -> &'static str {
|
||||
concat!(
|
||||
"\nbranch: ",
|
||||
env!("GIT_BRANCH"),
|
||||
"\ncommit: ",
|
||||
env!("GIT_COMMIT"),
|
||||
"\ndirty: ",
|
||||
env!("GIT_DIRTY"),
|
||||
"\nversion: ",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)
|
||||
}
|
||||
|
||||
fn short_version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
// {app_name}-{branch_name}-{commit_short}
|
||||
// The branch name (tag) of a release build should already contain the short
|
||||
// version so the full version doesn't concat the short version explicitly.
|
||||
fn full_version() -> &'static str {
|
||||
concat!(
|
||||
"greptimedb-",
|
||||
env!("GIT_BRANCH"),
|
||||
"-",
|
||||
env!("GIT_COMMIT_SHORT")
|
||||
)
|
||||
}
|
||||
|
||||
fn log_env_flags() {
|
||||
info!("command line arguments");
|
||||
for argument in std::env::args() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let metadata = human_panic::Metadata {
|
||||
version: env!("CARGO_PKG_VERSION").into(),
|
||||
name: "GreptimeDB".into(),
|
||||
authors: Default::default(),
|
||||
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
|
||||
let cmd = Command::parse();
|
||||
let app_name = &cmd.subcmd.to_string();
|
||||
|
||||
let opts = cmd.load_options()?;
|
||||
let logging_opts = opts.logging_options();
|
||||
let tracing_opts = TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: cmd.tokio_console_addr.clone(),
|
||||
};
|
||||
human_panic::setup_panic!(metadata);
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
let _guard =
|
||||
common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts, opts.node_id());
|
||||
|
||||
let cli = greptimedb_cli();
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[short_version(), full_version()])
|
||||
.inc();
|
||||
|
||||
let cli = SubCommand::augment_subcommands(cli);
|
||||
|
||||
let args = cli.get_matches();
|
||||
|
||||
let subcmd = match SubCommand::from_arg_matches(&args) {
|
||||
Ok(subcmd) => subcmd,
|
||||
Err(e) => e.exit(),
|
||||
};
|
||||
|
||||
let app_name = subcmd.to_string();
|
||||
|
||||
let cli_options = CliOptions::new(&args);
|
||||
|
||||
let opts = subcmd.load_options(&cli_options)?;
|
||||
|
||||
let _guard = common_telemetry::init_global_logging(
|
||||
&app_name,
|
||||
opts.logging_options(),
|
||||
cli_options.tracing_options(),
|
||||
opts.node_id(),
|
||||
// Log version and argument flags.
|
||||
info!(
|
||||
"short_version: {}, full_version: {}",
|
||||
short_version(),
|
||||
full_version()
|
||||
);
|
||||
log_env_flags();
|
||||
|
||||
log_versions();
|
||||
let mut app = cmd.build(opts).await?;
|
||||
|
||||
let app = subcmd.build(opts).await?;
|
||||
tokio::select! {
|
||||
result = app.start() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Fatal error occurs!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
}
|
||||
|
||||
start_app(app).await
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -13,15 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod bench;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
#[allow(unused)]
|
||||
mod cmd;
|
||||
mod export;
|
||||
mod helper;
|
||||
|
||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||
#[allow(unused)]
|
||||
mod repl;
|
||||
// TODO(weny): Removes it
|
||||
#[allow(deprecated)]
|
||||
@@ -36,35 +30,27 @@ use upgrade::UpgradeCommand;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Tool: Send + Sync {
|
||||
pub trait Tool {
|
||||
async fn do_work(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
pub struct Instance {
|
||||
tool: Box<dyn Tool>,
|
||||
pub enum Instance {
|
||||
Repl(Repl),
|
||||
Tool(Box<dyn Tool>),
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(tool: Box<dyn Tool>) -> Self {
|
||||
Self { tool }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
"greptime-cli"
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Instance::Repl(repl) => repl.run().await,
|
||||
Instance::Tool(tool) => tool.do_work().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
self.tool.do_work().await
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -80,15 +66,14 @@ impl Command {
|
||||
self.cmd.build().await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut logging_opts = LoggingOptions::default();
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
logging_opts.dir = dir.clone();
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
logging_opts.dir = dir;
|
||||
}
|
||||
if top_level_opts.log_level.is_some() {
|
||||
logging_opts.level = top_level_opts.log_level;
|
||||
}
|
||||
|
||||
logging_opts.level = cli_options.log_level.clone();
|
||||
|
||||
Ok(Options::Cli(Box::new(logging_opts)))
|
||||
}
|
||||
}
|
||||
@@ -125,6 +110,7 @@ pub(crate) struct AttachCommand {
|
||||
impl AttachCommand {
|
||||
#[allow(dead_code)]
|
||||
async fn build(self) -> Result<Instance> {
|
||||
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance::Repl(repl))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::BTreeMap;
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -28,7 +28,6 @@ use common_telemetry::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use rand::Rng;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||
|
||||
use self::metadata::TableMetadataBencher;
|
||||
@@ -70,7 +69,7 @@ impl BenchTableMetadataCommand {
|
||||
table_metadata_manager,
|
||||
count: self.count,
|
||||
};
|
||||
Ok(Instance::new(Box::new(tool)))
|
||||
Ok(Instance::Tool(Box::new(tool)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,12 +137,12 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
let mut region_routes = Vec::with_capacity(100);
|
||||
fn create_region_routes() -> Vec<RegionRoute> {
|
||||
let mut regions = Vec::with_capacity(100);
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for region_id in regions.into_iter().map(u64::from) {
|
||||
region_routes.push(RegionRoute {
|
||||
for region_id in 0..64u64 {
|
||||
regions.push(RegionRoute {
|
||||
region: Region {
|
||||
id: region_id.into(),
|
||||
name: String::new(),
|
||||
@@ -159,11 +158,5 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
});
|
||||
}
|
||||
|
||||
region_routes
|
||||
}
|
||||
|
||||
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, String> {
|
||||
// TODO(niebayes): construct region wal options for benchmark.
|
||||
let _ = regions;
|
||||
HashMap::default()
|
||||
regions
|
||||
}
|
||||
|
||||
@@ -17,9 +17,7 @@ use std::time::Instant;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::table_name::TableName;
|
||||
|
||||
use crate::cli::bench::{
|
||||
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||
};
|
||||
use super::{bench_self_recorded, create_region_routes, create_table_info};
|
||||
|
||||
pub struct TableMetadataBencher {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
@@ -45,15 +43,12 @@ impl TableMetadataBencher {
|
||||
let table_name = format!("bench_table_name_{}", i);
|
||||
let table_name = TableName::new("bench_catalog", "bench_schema", table_name);
|
||||
let table_info = create_table_info(i, table_name);
|
||||
|
||||
let regions: Vec<_> = (0..64).collect();
|
||||
let region_routes = create_region_routes(regions.clone());
|
||||
let region_wal_options = create_region_wal_options(regions);
|
||||
let region_routes = create_region_routes();
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
self.table_metadata_manager
|
||||
.create_table_metadata(table_info, region_routes, region_wal_options)
|
||||
.create_table_metadata(table_info, region_routes)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ impl ExportCommand {
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(Instance::new(Box::new(Export {
|
||||
Ok(Instance::Tool(Box::new(Export {
|
||||
client: database_client,
|
||||
catalog,
|
||||
schema,
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -77,7 +76,7 @@ impl UpgradeCommand {
|
||||
skip_schema_keys: self.skip_schema_keys,
|
||||
skip_table_route_keys: self.skip_table_route_keys,
|
||||
};
|
||||
Ok(Instance::new(Box::new(tool)))
|
||||
Ok(Instance::Tool(Box::new(tool)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -396,9 +395,6 @@ impl MigrateTableMetadata {
|
||||
let region_distribution: RegionDistribution =
|
||||
value.regions_id_map.clone().into_iter().collect();
|
||||
|
||||
// TODO(niebayes): properly fetch or construct wal options.
|
||||
let region_wal_options = HashMap::default();
|
||||
|
||||
let datanode_table_kvs = region_distribution
|
||||
.into_iter()
|
||||
.map(|(datanode_id, regions)| {
|
||||
@@ -413,7 +409,6 @@ impl MigrateTableMetadata {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.clone(),
|
||||
region_options: (&value.table_info.meta.options).into(),
|
||||
region_wal_options: region_wal_options.clone(),
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
@@ -15,11 +15,9 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::MetaKvBackend;
|
||||
use clap::Parser;
|
||||
use common_config::WalConfig;
|
||||
use common_telemetry::{info, logging};
|
||||
use common_telemetry::logging;
|
||||
use datanode::config::DatanodeOptions;
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use meta_client::MetaClientOptions;
|
||||
@@ -27,26 +25,14 @@ use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(datanode: Datanode) -> Self {
|
||||
Self { datanode }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
"greptime-datanode"
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_datanode_plugins(self.datanode.plugins())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -54,7 +40,7 @@ impl App for Instance {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
@@ -73,8 +59,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,9 +76,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,19 +108,19 @@ struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
DatanodeOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
if top_level_opts.log_level.is_some() {
|
||||
opts.logging.level = top_level_opts.log_level;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
@@ -167,18 +153,8 @@ impl StartCommand {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
// `wal_dir` only affects raft-engine config.
|
||||
if let Some(wal_dir) = &self.wal_dir
|
||||
&& let WalConfig::RaftEngine(raft_engine_config) = &mut opts.wal
|
||||
{
|
||||
if raft_engine_config
|
||||
.dir
|
||||
.as_ref()
|
||||
.is_some_and(|original_dir| original_dir != wal_dir)
|
||||
{
|
||||
info!("The wal dir of raft-engine is altered to {wal_dir}");
|
||||
}
|
||||
raft_engine_config.dir.replace(wal_dir.clone());
|
||||
if let Some(wal_dir) = &self.wal_dir {
|
||||
opts.wal.dir = Some(wal_dir.clone());
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
@@ -228,7 +204,7 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
Ok(Instance::new(datanode))
|
||||
Ok(Instance { datanode })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,12 +214,12 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use datanode::config::{FileConfig, ObjectStoreConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_read_from_config_file() {
|
||||
@@ -267,7 +243,6 @@ mod tests {
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/other/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
@@ -276,17 +251,8 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "Gcs"
|
||||
bucket = "foo"
|
||||
endpoint = "bar"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "S3"
|
||||
bucket = "foo"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
@@ -299,24 +265,19 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
let Options::Datanode(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
assert_eq!("/other/wal", options.wal.dir.unwrap());
|
||||
|
||||
let WalConfig::RaftEngine(raft_engine_config) = options.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/other/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!(Duration::from_secs(600), raft_engine_config.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, raft_engine_config.file_size.0);
|
||||
assert_eq!(
|
||||
1024 * 1024 * 1024 * 50,
|
||||
raft_engine_config.purge_threshold.0
|
||||
);
|
||||
assert!(!raft_engine_config.sync_write);
|
||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
assert!(!options.wal.sync_write);
|
||||
|
||||
let HeartbeatOptions {
|
||||
interval: heart_beat_interval,
|
||||
@@ -344,15 +305,6 @@ mod tests {
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(options.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
options.storage.providers[0],
|
||||
ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
options.storage.providers[1],
|
||||
ObjectStoreConfig::S3(S3Config { .. })
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
@@ -361,7 +313,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
if let Options::Datanode(opt) = StartCommand::default()
|
||||
.load_options(&CliOptions::default())
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Standalone, opt.mode)
|
||||
@@ -372,7 +324,7 @@ mod tests {
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&CliOptions::default())
|
||||
.load_options(TopLevelOptions::default())
|
||||
.unwrap()
|
||||
{
|
||||
assert_eq!(Mode::Distributed, opt.mode)
|
||||
@@ -382,7 +334,7 @@ mod tests {
|
||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&CliOptions::default())
|
||||
.load_options(TopLevelOptions::default())
|
||||
.is_err());
|
||||
|
||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||
@@ -390,21 +342,18 @@ mod tests {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&CliOptions::default())
|
||||
.load_options(TopLevelOptions::default())
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand::default();
|
||||
|
||||
let options = cmd
|
||||
.load_options(&CliOptions {
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: None,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@@ -430,10 +379,9 @@ mod tests {
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "5m"
|
||||
purge_interval = "10m"
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
@@ -488,16 +436,14 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Datanode(opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||
let Options::Datanode(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
let WalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.read_batch_size, 100);
|
||||
assert_eq!(opts.wal.read_batch_size, 100,);
|
||||
assert_eq!(
|
||||
opts.meta_client.unwrap().metasrv_addrs,
|
||||
vec![
|
||||
@@ -508,13 +454,10 @@ mod tests {
|
||||
);
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(
|
||||
raft_engine_config.purge_interval,
|
||||
Duration::from_secs(60 * 5)
|
||||
);
|
||||
assert_eq!(opts.wal.purge_interval, Duration::from_secs(60 * 10));
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use config::ConfigError;
|
||||
@@ -55,12 +55,6 @@ pub enum Error {
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start wal options allocator"))]
|
||||
StartWalOptionsAllocator {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start datanode"))]
|
||||
StartDatanode {
|
||||
location: Location,
|
||||
@@ -237,12 +231,6 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Other error"))]
|
||||
Other {
|
||||
source: BoxedError,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -276,7 +264,6 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
@@ -289,8 +276,6 @@ impl ErrorExt for Error {
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::kvbackend::CachedMetaKvBackend;
|
||||
use clap::Parser;
|
||||
use client::client_manager::DatanodeClients;
|
||||
@@ -33,26 +32,14 @@ use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(frontend: FeInstance) -> Self {
|
||||
Self { frontend }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
"greptime-frontend"
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -60,7 +47,7 @@ impl App for Instance {
|
||||
self.frontend.start().await.context(StartFrontendSnafu)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
@@ -79,8 +66,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,9 +83,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,19 +125,19 @@ pub struct StartCommand {
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
FrontendOptions::env_list_keys(),
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
if top_level_opts.log_level.is_some() {
|
||||
opts.logging.level = top_level_opts.log_level;
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -249,16 +236,12 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_export_metrics_task(&opts.export_metrics)
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance::new(instance))
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,7 +257,7 @@ mod tests {
|
||||
use servers::http::HttpOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[test]
|
||||
fn test_try_from_start_command() {
|
||||
@@ -288,7 +271,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(opts) = command.load_options(&CliOptions::default()).unwrap() else {
|
||||
let Options::Frontend(opts) = command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -340,7 +324,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||
let Options::Frontend(fe_opts) = command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
@@ -379,19 +363,16 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(&CliOptions {
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: None,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@@ -471,8 +452,11 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Frontend(fe_opts) =
|
||||
command.load_options(&CliOptions::default()).unwrap()
|
||||
let top_level_opts = TopLevelOptions {
|
||||
log_dir: None,
|
||||
log_level: Some("error".to_string()),
|
||||
};
|
||||
let Options::Frontend(fe_opts) = command.load_options(top_level_opts).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -12,11 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches, let_chains)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::arg;
|
||||
use common_telemetry::{error, info};
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod cli;
|
||||
pub mod datanode;
|
||||
@@ -25,100 +21,3 @@ pub mod frontend;
|
||||
pub mod metasrv;
|
||||
pub mod options;
|
||||
pub mod standalone;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref APP_VERSION: prometheus::IntGaugeVec =
|
||||
prometheus::register_int_gauge_vec!("app_version", "app version", &["short_version", "version"]).unwrap();
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait App {
|
||||
fn name(&self) -> &str;
|
||||
|
||||
async fn start(&mut self) -> error::Result<()>;
|
||||
|
||||
async fn stop(&self) -> error::Result<()>;
|
||||
}
|
||||
|
||||
pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
||||
let name = app.name().to_string();
|
||||
|
||||
tokio::select! {
|
||||
result = app.start() => {
|
||||
if let Err(err) = result {
|
||||
error!(err; "Failed to start app {name}!");
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
if let Err(err) = app.stop().await {
|
||||
error!(err; "Failed to stop app {name}!");
|
||||
}
|
||||
info!("Goodbye!");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn log_versions() {
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[short_version(), full_version()])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
info!(
|
||||
"short_version: {}, full_version: {}",
|
||||
short_version(),
|
||||
full_version()
|
||||
);
|
||||
|
||||
log_env_flags();
|
||||
}
|
||||
|
||||
pub fn greptimedb_cli() -> clap::Command {
|
||||
let cmd = clap::Command::new("greptimedb")
|
||||
.version(print_version())
|
||||
.subcommand_required(true);
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
let cmd = cmd.arg(arg!(--"tokio-console-addr"[TOKIO_CONSOLE_ADDR]));
|
||||
|
||||
cmd.args([arg!(--"log-dir"[LOG_DIR]), arg!(--"log-level"[LOG_LEVEL])])
|
||||
}
|
||||
|
||||
fn print_version() -> &'static str {
|
||||
concat!(
|
||||
"\nbranch: ",
|
||||
env!("GIT_BRANCH"),
|
||||
"\ncommit: ",
|
||||
env!("GIT_COMMIT"),
|
||||
"\ndirty: ",
|
||||
env!("GIT_DIRTY"),
|
||||
"\nversion: ",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
)
|
||||
}
|
||||
|
||||
fn short_version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
// {app_name}-{branch_name}-{commit_short}
|
||||
// The branch name (tag) of a release build should already contain the short
|
||||
// version so the full version doesn't concat the short version explicitly.
|
||||
fn full_version() -> &'static str {
|
||||
concat!(
|
||||
"greptimedb-",
|
||||
env!("GIT_BRANCH"),
|
||||
"-",
|
||||
env!("GIT_COMMIT_SHORT")
|
||||
)
|
||||
}
|
||||
|
||||
fn log_env_flags() {
|
||||
info!("command line arguments");
|
||||
for argument in std::env::args() {
|
||||
info!("argument: {}", argument);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_telemetry::logging;
|
||||
use meta_srv::bootstrap::MetaSrvInstance;
|
||||
@@ -22,34 +21,21 @@ use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
||||
use crate::options::{CliOptions, Options};
|
||||
use crate::App;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
instance: MetaSrvInstance,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(instance: MetaSrvInstance) -> Self {
|
||||
Self { instance }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
"greptime-metasrv"
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_meta_srv_plugins(self.instance.plugins())
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
self.instance.start().await.context(StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.instance
|
||||
.shutdown()
|
||||
.await
|
||||
@@ -68,8 +54,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,9 +71,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -117,26 +103,22 @@ struct StartCommand {
|
||||
/// The working home directory of this metasrv instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
|
||||
/// If it's not empty, the metasrv will store all data with this key prefix.
|
||||
#[clap(long)]
|
||||
store_key_prefix: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: MetaSrvOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
if let Some(dir) = top_level_opts.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
if top_level_opts.log_level.is_some() {
|
||||
opts.logging.level = top_level_opts.log_level;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
@@ -177,8 +159,6 @@ impl StartCommand {
|
||||
opts.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
opts.store_key_prefix = self.store_key_prefix.clone();
|
||||
|
||||
// Disable dashboard in metasrv.
|
||||
opts.http.disable_dashboard = true;
|
||||
|
||||
@@ -202,7 +182,7 @@ impl StartCommand {
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
Ok(Instance::new(instance))
|
||||
Ok(Instance { instance })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,7 +206,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
let Options::Metasrv(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
@@ -261,7 +242,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
let Options::Metasrv(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||
@@ -292,7 +274,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
@@ -302,12 +284,9 @@ mod tests {
|
||||
};
|
||||
|
||||
let options = cmd
|
||||
.load_options(&CliOptions {
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: None,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@@ -366,7 +345,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Metasrv(opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||
let Options::Metasrv(opts) =
|
||||
command.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -12,10 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use clap::ArgMatches;
|
||||
use common_config::KvBackendConfig;
|
||||
use common_meta::wal::WalConfig as MetaSrvWalConfig;
|
||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use config::{Config, Environment, File, FileFormat};
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
||||
use frontend::error::{Result as FeResult, TomlFormatSnafu};
|
||||
@@ -30,7 +28,7 @@ pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
#[derive(Serialize, Debug, Clone)]
|
||||
#[derive(Serialize, Debug)]
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -38,7 +36,6 @@ pub struct MixOptions {
|
||||
pub frontend: FrontendOptions,
|
||||
pub datanode: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
pub wal_meta: MetaSrvWalConfig,
|
||||
}
|
||||
|
||||
impl From<MixOptions> for FrontendOptions {
|
||||
@@ -61,32 +58,10 @@ pub enum Options {
|
||||
Cli(Box<LoggingOptions>),
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CliOptions {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TopLevelOptions {
|
||||
pub log_dir: Option<String>,
|
||||
pub log_level: Option<String>,
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
pub tokio_console_addr: Option<String>,
|
||||
}
|
||||
|
||||
impl CliOptions {
|
||||
pub fn new(args: &ArgMatches) -> Self {
|
||||
Self {
|
||||
log_dir: args.get_one::<String>("log-dir").cloned(),
|
||||
log_level: args.get_one::<String>("log-level").cloned(),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: args.get_one::<String>("tokio-console-addr").cloned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tracing_options(&self) -> TracingOptions {
|
||||
TracingOptions {
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: self.tokio_console_addr.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Options {
|
||||
@@ -173,7 +148,6 @@ impl Options {
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use common_config::WalConfig;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
||||
|
||||
@@ -197,7 +171,6 @@ mod tests {
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
@@ -265,7 +238,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match &opts.storage.store {
|
||||
match opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
@@ -281,10 +254,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
let WalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
|
||||
@@ -15,20 +15,14 @@
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_catalog::consts::MIN_USER_TABLE_ID;
|
||||
use common_config::wal::StandaloneWalConfig;
|
||||
use common_config::{metadata_store_dir, KvBackendConfig};
|
||||
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyCacheInvalidator;
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::ddl::{DdlTaskExecutorRef, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::DdlTaskExecutorRef;
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
@@ -37,14 +31,13 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::standalone::StandaloneTableMetadataAllocator;
|
||||
use frontend::instance::standalone::StandaloneTableMetadataCreator;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
};
|
||||
use mito2::config::MitoConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
@@ -53,10 +46,9 @@ use snafu::ResultExt;
|
||||
use crate::error::{
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result,
|
||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
StartProcedureManagerSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{CliOptions, MixOptions, Options};
|
||||
use crate::App;
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
@@ -69,8 +61,8 @@ impl Command {
|
||||
self.subcmd.build(opts).await
|
||||
}
|
||||
|
||||
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(cli_options)
|
||||
pub fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
self.subcmd.load_options(top_level_options)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,9 +78,9 @@ impl SubCommand {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
match self {
|
||||
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||
SubCommand::Start(cmd) => cmd.load_options(top_level_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -105,7 +97,7 @@ pub struct StandaloneOptions {
|
||||
pub opentsdb: OpentsdbOptions,
|
||||
pub influxdb: InfluxdbOptions,
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: StandaloneWalConfig,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
@@ -113,7 +105,6 @@ pub struct StandaloneOptions {
|
||||
pub user_provider: Option<String>,
|
||||
/// Options for different store engines.
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
pub export_metrics: ExportMetricsOption,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -128,12 +119,11 @@ impl Default for StandaloneOptions {
|
||||
opentsdb: OpentsdbOptions::default(),
|
||||
influxdb: InfluxdbOptions::default(),
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: StandaloneWalConfig::default(),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
metadata_store: KvBackendConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
export_metrics: ExportMetricsOption::default(),
|
||||
user_provider: None,
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig::default()),
|
||||
@@ -157,8 +147,6 @@ impl StandaloneOptions {
|
||||
meta_client: None,
|
||||
logging: self.logging,
|
||||
user_provider: self.user_provider,
|
||||
// Handle the export metrics task run by standalone to frontend for execution
|
||||
export_metrics: self.export_metrics,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -167,7 +155,7 @@ impl StandaloneOptions {
|
||||
DatanodeOptions {
|
||||
node_id: Some(0),
|
||||
enable_telemetry: self.enable_telemetry,
|
||||
wal: self.wal.into(),
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
region_engine: self.region_engine,
|
||||
rpc_addr: self.grpc.addr,
|
||||
@@ -180,16 +168,10 @@ pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl App for Instance {
|
||||
fn name(&self) -> &str {
|
||||
"greptime-standalone"
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.datanode.start_telemetry();
|
||||
|
||||
self.procedure_manager
|
||||
@@ -197,16 +179,11 @@ impl App for Instance {
|
||||
.await
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
|
||||
self.wal_options_allocator
|
||||
.start()
|
||||
.await
|
||||
.context(StartWalOptionsAllocatorSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
@@ -228,7 +205,7 @@ impl App for Instance {
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct StartCommand {
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -242,7 +219,7 @@ pub struct StartCommand {
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: bool,
|
||||
#[clap(short, long)]
|
||||
pub config_file: Option<String>,
|
||||
config_file: Option<String>,
|
||||
#[clap(long)]
|
||||
tls_mode: Option<TlsMode>,
|
||||
#[clap(long)]
|
||||
@@ -252,36 +229,28 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
user_provider: Option<String>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||
pub env_prefix: String,
|
||||
env_prefix: String,
|
||||
/// The working home directory of this standalone instance.
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||
let opts: StandaloneOptions = Options::load_layered_options(
|
||||
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
|
||||
let mut opts: StandaloneOptions = Options::load_layered_options(
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
|
||||
self.convert_options(cli_options, opts)
|
||||
}
|
||||
|
||||
pub fn convert_options(
|
||||
&self,
|
||||
cli_options: &CliOptions,
|
||||
mut opts: StandaloneOptions,
|
||||
) -> Result<Options> {
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &cli_options.log_dir {
|
||||
opts.logging.dir = dir.clone();
|
||||
if let Some(dir) = top_level_options.log_dir {
|
||||
opts.logging.dir = dir;
|
||||
}
|
||||
|
||||
if cli_options.log_level.is_some() {
|
||||
opts.logging.level = cli_options.log_level.clone();
|
||||
if top_level_options.log_level.is_some() {
|
||||
opts.logging.level = top_level_options.log_level;
|
||||
}
|
||||
|
||||
let tls_opts = TlsOption::new(
|
||||
@@ -339,8 +308,7 @@ impl StartCommand {
|
||||
let procedure = opts.procedure.clone();
|
||||
let frontend = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
let wal_meta = opts.wal.clone().into();
|
||||
let datanode = opts.datanode_options().clone();
|
||||
let datanode = opts.datanode_options();
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
procedure,
|
||||
@@ -349,7 +317,6 @@ impl StartCommand {
|
||||
frontend,
|
||||
datanode,
|
||||
logging,
|
||||
wal_meta,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -389,26 +356,10 @@ impl StartCommand {
|
||||
|
||||
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
||||
|
||||
let table_id_sequence = Arc::new(
|
||||
SequenceBuilder::new("table_id", kv_backend.clone())
|
||||
.initial(MIN_USER_TABLE_ID as u64)
|
||||
.step(10)
|
||||
.build(),
|
||||
);
|
||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||
opts.wal_meta.clone(),
|
||||
kv_backend.clone(),
|
||||
));
|
||||
let table_meta_allocator = Arc::new(StandaloneTableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
wal_options_allocator.clone(),
|
||||
));
|
||||
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
kv_backend.clone(),
|
||||
procedure_manager.clone(),
|
||||
datanode_manager.clone(),
|
||||
table_meta_allocator,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -418,10 +369,6 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend
|
||||
.build_export_metrics_task(&opts.frontend.export_metrics)
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
frontend
|
||||
.build_servers(opts)
|
||||
.await
|
||||
@@ -431,15 +378,13 @@ impl StartCommand {
|
||||
datanode,
|
||||
frontend,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn create_ddl_task_executor(
|
||||
async fn create_ddl_task_executor(
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Result<DdlTaskExecutorRef> {
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
@@ -450,8 +395,7 @@ impl StartCommand {
|
||||
datanode_manager,
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
Arc::new(MemoryRegionKeeper::default()),
|
||||
Arc::new(StandaloneTableMetadataCreator::new(kv_backend)),
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
);
|
||||
@@ -481,13 +425,11 @@ mod tests {
|
||||
|
||||
use auth::{Identity, Password, UserProviderRef};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_config::WalConfig;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
@@ -525,7 +467,6 @@ mod tests {
|
||||
enable_memory_catalog = true
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
@@ -534,15 +475,6 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "Gcs"
|
||||
bucket = "foo"
|
||||
endpoint = "bar"
|
||||
|
||||
[[storage.providers]]
|
||||
type = "S3"
|
||||
access_key_id = "access_key_id"
|
||||
secret_access_key = "secret_access_key"
|
||||
@@ -572,7 +504,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let fe_opts = options.frontend;
|
||||
@@ -589,21 +522,9 @@ mod tests {
|
||||
assert_eq!(None, fe_opts.mysql.reject_no_database);
|
||||
assert!(fe_opts.influxdb.enable);
|
||||
|
||||
let WalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/test/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
datanode::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(dn_opts.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
dn_opts.storage.providers[0],
|
||||
datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
match &dn_opts.storage.providers[1] {
|
||||
match &dn_opts.storage.store {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"Secret([REDACTED alloc::string::String])".to_string(),
|
||||
@@ -620,19 +541,16 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
fn test_top_level_options() {
|
||||
let cmd = StartCommand {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) = cmd
|
||||
.load_options(&CliOptions {
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: None,
|
||||
})
|
||||
.unwrap()
|
||||
else {
|
||||
@@ -699,8 +617,11 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Options::Standalone(opts) =
|
||||
command.load_options(&CliOptions::default()).unwrap()
|
||||
let top_level_opts = TopLevelOptions {
|
||||
log_dir: None,
|
||||
log_level: None,
|
||||
};
|
||||
let Options::Standalone(opts) = command.load_options(top_level_opts).unwrap()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
@@ -7,7 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes.workspace = true
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
paste = "1.0"
|
||||
|
||||
@@ -17,7 +17,6 @@ pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
|
||||
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
|
||||
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
|
||||
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
|
||||
|
||||
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
|
||||
/// User defined table id starts from this value.
|
||||
@@ -30,25 +29,13 @@ pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
|
||||
pub const SCRIPTS_TABLE_ID: u32 = 1;
|
||||
/// numbers table id
|
||||
pub const NUMBERS_TABLE_ID: u32 = 2;
|
||||
|
||||
/// ----- Begin of information_schema tables -----
|
||||
/// id for information_schema.tables
|
||||
pub const INFORMATION_SCHEMA_TABLES_TABLE_ID: u32 = 3;
|
||||
/// id for information_schema.columns
|
||||
pub const INFORMATION_SCHEMA_COLUMNS_TABLE_ID: u32 = 4;
|
||||
/// id for information_schema.engines
|
||||
pub const INFORMATION_SCHEMA_ENGINES_TABLE_ID: u32 = 5;
|
||||
/// id for information_schema.column_privileges
|
||||
pub const INFORMATION_SCHEMA_COLUMN_PRIVILEGES_TABLE_ID: u32 = 6;
|
||||
/// id for information_schema.column_statistics
|
||||
pub const INFORMATION_SCHEMA_COLUMN_STATISTICS_TABLE_ID: u32 = 7;
|
||||
/// id for information_schema.build_info
|
||||
pub const INFORMATION_SCHEMA_BUILD_INFO_TABLE_ID: u32 = 8;
|
||||
/// ----- End of information_schema tables -----
|
||||
|
||||
pub const MITO_ENGINE: &str = "mito";
|
||||
pub const MITO2_ENGINE: &str = "mito2";
|
||||
pub const METRIC_ENGINE: &str = "metric";
|
||||
|
||||
pub fn default_engine() -> &'static str {
|
||||
MITO_ENGINE
|
||||
|
||||
@@ -7,8 +7,4 @@ license.workspace = true
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with = "3"
|
||||
toml.workspace = true
|
||||
|
||||
@@ -12,12 +12,41 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod wal;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub use crate::wal::{KafkaWalOptions, WalConfig, WalOptions, WAL_OPTIONS_KEY};
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal directory
|
||||
pub dir: Option<String>,
|
||||
// wal file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
// wal purge threshold in bytes
|
||||
pub purge_threshold: ReadableSize,
|
||||
// purge interval in seconds
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub purge_interval: Duration,
|
||||
// read batch size
|
||||
pub read_batch_size: usize,
|
||||
// whether to sync log file after every write
|
||||
pub sync_write: bool,
|
||||
}
|
||||
|
||||
impl Default for WalConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: None,
|
||||
file_size: ReadableSize::mb(256), // log file size 256MB
|
||||
purge_threshold: ReadableSize::gb(4), // purge threshold 4GB
|
||||
purge_interval: Duration::from_secs(600),
|
||||
read_batch_size: 128,
|
||||
sync_write: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/metadata")
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod kafka;
|
||||
pub mod raft_engine;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::with_prefix;
|
||||
|
||||
pub use crate::wal::kafka::{
|
||||
KafkaConfig, KafkaOptions as KafkaWalOptions, StandaloneKafkaConfig, Topic as KafkaWalTopic,
|
||||
};
|
||||
pub use crate::wal::raft_engine::RaftEngineConfig;
|
||||
|
||||
/// An encoded wal options will be wrapped into a (WAL_OPTIONS_KEY, encoded wal options) key-value pair
|
||||
/// and inserted into the options of a `RegionCreateRequest`.
|
||||
pub const WAL_OPTIONS_KEY: &str = "wal_options";
|
||||
|
||||
/// Wal config for datanode.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(tag = "provider", rename_all = "snake_case")]
|
||||
pub enum WalConfig {
|
||||
RaftEngine(RaftEngineConfig),
|
||||
Kafka(KafkaConfig),
|
||||
}
|
||||
|
||||
impl From<StandaloneWalConfig> for WalConfig {
|
||||
fn from(value: StandaloneWalConfig) -> Self {
|
||||
match value {
|
||||
StandaloneWalConfig::RaftEngine(config) => WalConfig::RaftEngine(config),
|
||||
StandaloneWalConfig::Kafka(config) => WalConfig::Kafka(config.base),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WalConfig {
|
||||
fn default() -> Self {
|
||||
WalConfig::RaftEngine(RaftEngineConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Wal config for datanode.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(tag = "provider", rename_all = "snake_case")]
|
||||
pub enum StandaloneWalConfig {
|
||||
RaftEngine(RaftEngineConfig),
|
||||
Kafka(StandaloneKafkaConfig),
|
||||
}
|
||||
|
||||
impl Default for StandaloneWalConfig {
|
||||
fn default() -> Self {
|
||||
StandaloneWalConfig::RaftEngine(RaftEngineConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Wal options allocated to a region.
|
||||
/// A wal options is encoded by metasrv with `serde_json::to_string`, and then decoded
|
||||
/// by datanode with `serde_json::from_str`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(tag = "wal.provider", rename_all = "snake_case")]
|
||||
pub enum WalOptions {
|
||||
#[default]
|
||||
RaftEngine,
|
||||
#[serde(with = "prefix_wal_kafka")]
|
||||
Kafka(KafkaWalOptions),
|
||||
}
|
||||
|
||||
with_prefix!(prefix_wal_kafka "wal.kafka.");
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use rskafka::client::partition::Compression as RsKafkaCompression;
|
||||
|
||||
use crate::wal::kafka::KafkaBackoffConfig;
|
||||
use crate::wal::{KafkaConfig, KafkaWalOptions, WalOptions};
|
||||
|
||||
#[test]
|
||||
fn test_serde_kafka_config() {
|
||||
let toml_str = r#"
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
max_batch_size = "4MB"
|
||||
linger = "200ms"
|
||||
produce_record_timeout = "100ms"
|
||||
backoff_init = "500ms"
|
||||
backoff_max = "10s"
|
||||
backoff_base = 2
|
||||
backoff_deadline = "5mins"
|
||||
"#;
|
||||
let decoded: KafkaConfig = toml::from_str(toml_str).unwrap();
|
||||
let expected = KafkaConfig {
|
||||
broker_endpoints: vec!["127.0.0.1:9092".to_string()],
|
||||
compression: RsKafkaCompression::default(),
|
||||
max_batch_size: ReadableSize::mb(4),
|
||||
linger: Duration::from_millis(200),
|
||||
produce_record_timeout: Duration::from_millis(100),
|
||||
backoff: KafkaBackoffConfig {
|
||||
init: Duration::from_millis(500),
|
||||
max: Duration::from_secs(10),
|
||||
base: 2,
|
||||
deadline: Some(Duration::from_secs(60 * 5)),
|
||||
},
|
||||
};
|
||||
assert_eq!(decoded, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serde_wal_options() {
|
||||
// Test serde raft-engine wal options.
|
||||
let wal_options = WalOptions::RaftEngine;
|
||||
let encoded = serde_json::to_string(&wal_options).unwrap();
|
||||
let expected = r#"{"wal.provider":"raft_engine"}"#;
|
||||
assert_eq!(&encoded, expected);
|
||||
|
||||
let decoded: WalOptions = serde_json::from_str(&encoded).unwrap();
|
||||
assert_eq!(decoded, wal_options);
|
||||
|
||||
// Test serde kafka wal options.
|
||||
let wal_options = WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: "test_topic".to_string(),
|
||||
});
|
||||
let encoded = serde_json::to_string(&wal_options).unwrap();
|
||||
let expected = r#"{"wal.provider":"kafka","wal.kafka.topic":"test_topic"}"#;
|
||||
assert_eq!(&encoded, expected);
|
||||
|
||||
let decoded: WalOptions = serde_json::from_str(&encoded).unwrap();
|
||||
assert_eq!(decoded, wal_options);
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use rskafka::client::partition::Compression as RsKafkaCompression;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::with_prefix;
|
||||
|
||||
/// Topic name prefix.
|
||||
pub const TOPIC_NAME_PREFIX: &str = "greptimedb_wal_topic";
|
||||
/// Kafka wal topic.
|
||||
pub type Topic = String;
|
||||
|
||||
/// The type of the topic selector, i.e. with which strategy to select a topic.
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum TopicSelectorType {
|
||||
#[default]
|
||||
RoundRobin,
|
||||
}
|
||||
|
||||
/// Configurations for kafka wal.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KafkaConfig {
|
||||
/// The broker endpoints of the Kafka cluster.
|
||||
pub broker_endpoints: Vec<String>,
|
||||
/// The compression algorithm used to compress log entries.
|
||||
#[serde(skip)]
|
||||
#[serde(default)]
|
||||
pub compression: RsKafkaCompression,
|
||||
/// The maximum log size a kakfa batch producer could buffer.
|
||||
pub max_batch_size: ReadableSize,
|
||||
/// The linger duration of a kafka batch producer.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub linger: Duration,
|
||||
/// The maximum amount of time (in milliseconds) to wait for Kafka records to be returned.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub produce_record_timeout: Duration,
|
||||
/// The backoff config.
|
||||
#[serde(flatten, with = "kafka_backoff")]
|
||||
pub backoff: KafkaBackoffConfig,
|
||||
}
|
||||
|
||||
impl Default for KafkaConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
broker_endpoints: vec!["127.0.0.1:9092".to_string()],
|
||||
compression: RsKafkaCompression::NoCompression,
|
||||
max_batch_size: ReadableSize::mb(4),
|
||||
linger: Duration::from_millis(200),
|
||||
produce_record_timeout: Duration::from_millis(100),
|
||||
backoff: KafkaBackoffConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with_prefix!(pub kafka_backoff "backoff_");
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KafkaBackoffConfig {
|
||||
/// The initial backoff for kafka clients.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub init: Duration,
|
||||
/// The maximum backoff for kafka clients.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub max: Duration,
|
||||
/// Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
// Sets to u32 type since some structs containing the KafkaConfig need to derive the Eq trait.
|
||||
pub base: u32,
|
||||
/// Stop reconnecting if the total wait time reaches the deadline.
|
||||
/// If it's None, the reconnecting won't terminate.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub deadline: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for KafkaBackoffConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
init: Duration::from_millis(500),
|
||||
max: Duration::from_secs(10),
|
||||
base: 2,
|
||||
deadline: Some(Duration::from_secs(60 * 5)), // 5 mins
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneKafkaConfig {
|
||||
#[serde(flatten)]
|
||||
pub base: KafkaConfig,
|
||||
/// Number of topics to be created upon start.
|
||||
pub num_topics: usize,
|
||||
/// The type of the topic selector with which to select a topic for a region.
|
||||
pub selector_type: TopicSelectorType,
|
||||
/// Topic name prefix.
|
||||
pub topic_name_prefix: String,
|
||||
/// Number of partitions per topic.
|
||||
pub num_partitions: i32,
|
||||
/// The replication factor of each topic.
|
||||
pub replication_factor: i16,
|
||||
/// Above which a topic creation operation will be cancelled.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub create_topic_timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for StandaloneKafkaConfig {
|
||||
fn default() -> Self {
|
||||
let base = KafkaConfig::default();
|
||||
let replication_factor = base.broker_endpoints.len() as i16;
|
||||
|
||||
Self {
|
||||
base,
|
||||
num_topics: 64,
|
||||
selector_type: TopicSelectorType::RoundRobin,
|
||||
topic_name_prefix: "greptimedb_wal_topic".to_string(),
|
||||
num_partitions: 1,
|
||||
replication_factor,
|
||||
create_topic_timeout: Duration::from_secs(30),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Kafka wal options allocated to a region.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct KafkaOptions {
|
||||
/// Kafka wal topic.
|
||||
pub topic: Topic,
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Configurations for raft-engine wal.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct RaftEngineConfig {
|
||||
// wal directory
|
||||
pub dir: Option<String>,
|
||||
// wal file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
// wal purge threshold in bytes
|
||||
pub purge_threshold: ReadableSize,
|
||||
// purge interval in seconds
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub purge_interval: Duration,
|
||||
// read batch size
|
||||
pub read_batch_size: usize,
|
||||
// whether to sync log file after every write
|
||||
pub sync_write: bool,
|
||||
}
|
||||
|
||||
impl Default for RaftEngineConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: None,
|
||||
file_size: ReadableSize::mb(256),
|
||||
purge_threshold: ReadableSize::gb(4),
|
||||
purge_interval: Duration::from_secs(600),
|
||||
read_batch_size: 128,
|
||||
sync_write: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,7 @@ async-compression = { version = "0.3", features = [
|
||||
"tokio",
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
bytes = "1.1"
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-runtime.workspace = true
|
||||
|
||||
@@ -23,15 +23,6 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
|
||||
let _ = builder.root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(BuildBackendSnafu)?
|
||||
.layer(
|
||||
object_store::layers::LoggingLayer::default()
|
||||
// Print the expected error only in DEBUG level.
|
||||
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
|
||||
.with_error_level(Some("debug"))
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer)
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
}
|
||||
|
||||
@@ -80,18 +80,8 @@ pub fn build_s3_backend(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(weny): Consider finding a better way to eliminate duplicate code.
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::BuildBackendSnafu)?
|
||||
.layer(
|
||||
object_store::layers::LoggingLayer::default()
|
||||
// Print the expected error only in DEBUG level.
|
||||
// See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
|
||||
.with_error_level(Some("debug"))
|
||||
.expect("input error level must be valid"),
|
||||
)
|
||||
.layer(object_store::layers::TracingLayer)
|
||||
.layer(object_store::layers::PrometheusMetricsLayer)
|
||||
.finish())
|
||||
}
|
||||
|
||||
|
||||
@@ -11,5 +11,5 @@ common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
rust_decimal.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -110,15 +110,9 @@ impl Decimal128 {
|
||||
}
|
||||
|
||||
/// Convert from precision, scale, a i128 value which
|
||||
/// represents by i64 + i64 value(high-64 bit, low-64 bit).
|
||||
/// represents by two i64 value(high-64 bit, low-64 bit).
|
||||
pub fn from_value_precision_scale(hi: i64, lo: i64, precision: u8, scale: i8) -> Self {
|
||||
// 128 64 0
|
||||
// +-------+-------+-------+-------+-------+-------+-------+-------+
|
||||
// | hi | lo |
|
||||
// +-------+-------+-------+-------+-------+-------+-------+-------+
|
||||
let hi = (hi as u128 & u64::MAX as u128) << 64;
|
||||
let lo = lo as u128 & u64::MAX as u128;
|
||||
let value = (hi | lo) as i128;
|
||||
let value = (hi as i128) << 64 | lo as i128;
|
||||
Self::new(value, precision, scale)
|
||||
}
|
||||
}
|
||||
@@ -435,30 +429,4 @@ mod tests {
|
||||
let decimal2 = Decimal128::from_str("1234567890.123").unwrap();
|
||||
assert_eq!(decimal1.partial_cmp(&decimal2), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_with_i128() {
|
||||
let test_decimal128_eq = |value| {
|
||||
let decimal1 =
|
||||
Decimal128::new(value, DECIMAL128_MAX_PRECISION, DECIMAL128_DEFAULT_SCALE);
|
||||
let (hi, lo) = decimal1.split_value();
|
||||
let decimal2 = Decimal128::from_value_precision_scale(
|
||||
hi,
|
||||
lo,
|
||||
DECIMAL128_MAX_PRECISION,
|
||||
DECIMAL128_DEFAULT_SCALE,
|
||||
);
|
||||
assert_eq!(decimal1, decimal2);
|
||||
};
|
||||
|
||||
test_decimal128_eq(1 << 63);
|
||||
|
||||
test_decimal128_eq(0);
|
||||
test_decimal128_eq(1234567890);
|
||||
test_decimal128_eq(-1234567890);
|
||||
test_decimal128_eq(32781372819372817382183218i128);
|
||||
test_decimal128_eq(-32781372819372817382183218i128);
|
||||
test_decimal128_eq(i128::MAX);
|
||||
test_decimal128_eq(i128::MIN);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,10 +59,6 @@ pub enum StatusCode {
|
||||
RegionNotFound = 4005,
|
||||
RegionAlreadyExists = 4006,
|
||||
RegionReadonly = 4007,
|
||||
RegionNotReady = 4008,
|
||||
// If mutually exclusive operations are reached at the same time,
|
||||
// only one can be executed, another one will get region busy.
|
||||
RegionBusy = 4009,
|
||||
// ====== End of catalog related status code =======
|
||||
|
||||
// ====== Begin of storage related status code =====
|
||||
@@ -107,9 +103,7 @@ impl StatusCode {
|
||||
match self {
|
||||
StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted
|
||||
| StatusCode::Internal
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::RegionBusy => true,
|
||||
| StatusCode::Internal => true,
|
||||
|
||||
StatusCode::Success
|
||||
| StatusCode::Unknown
|
||||
@@ -144,6 +138,7 @@ impl StatusCode {
|
||||
pub fn should_log_error(&self) -> bool {
|
||||
match self {
|
||||
StatusCode::Unknown
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::Unexpected
|
||||
| StatusCode::Internal
|
||||
| StatusCode::Cancelled
|
||||
@@ -152,14 +147,11 @@ impl StatusCode {
|
||||
| StatusCode::StorageUnavailable
|
||||
| StatusCode::RuntimeResourcesExhausted => true,
|
||||
StatusCode::Success
|
||||
| StatusCode::Unsupported
|
||||
| StatusCode::InvalidArguments
|
||||
| StatusCode::InvalidSyntax
|
||||
| StatusCode::TableAlreadyExists
|
||||
| StatusCode::TableNotFound
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::RegionBusy
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::RegionReadonly
|
||||
| StatusCode::TableColumnNotFound
|
||||
@@ -191,8 +183,6 @@ impl StatusCode {
|
||||
v if v == StatusCode::TableAlreadyExists as u32 => Some(StatusCode::TableAlreadyExists),
|
||||
v if v == StatusCode::TableNotFound as u32 => Some(StatusCode::TableNotFound),
|
||||
v if v == StatusCode::RegionNotFound as u32 => Some(StatusCode::RegionNotFound),
|
||||
v if v == StatusCode::RegionNotReady as u32 => Some(StatusCode::RegionNotReady),
|
||||
v if v == StatusCode::RegionBusy as u32 => Some(StatusCode::RegionBusy),
|
||||
v if v == StatusCode::RegionAlreadyExists as u32 => {
|
||||
Some(StatusCode::RegionAlreadyExists)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arc-swap = "1.0"
|
||||
build-data = "0.1"
|
||||
chrono-tz = "0.6"
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
|
||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||
let mut sigs = Vec::with_capacity(args1.len() * args2.len());
|
||||
|
||||
for arg1 in &args1 {
|
||||
for arg2 in &args2 {
|
||||
sigs.push(TypeSignature::Exact(vec![arg1.clone(), arg2.clone()]));
|
||||
}
|
||||
}
|
||||
|
||||
Signature::one_of(sigs, Volatility::Immutable)
|
||||
}
|
||||
@@ -13,8 +13,3 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod scalars;
|
||||
pub mod system;
|
||||
|
||||
pub mod function;
|
||||
pub mod function_registry;
|
||||
pub mod helper;
|
||||
|
||||
@@ -13,11 +13,15 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod aggregate;
|
||||
pub(crate) mod date;
|
||||
pub mod expression;
|
||||
pub mod function;
|
||||
pub mod function_registry;
|
||||
pub mod math;
|
||||
pub mod numpy;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test;
|
||||
pub(crate) mod timestamp;
|
||||
mod timestamp;
|
||||
pub mod udf;
|
||||
|
||||
pub use function::{Function, FunctionRef};
|
||||
pub use function_registry::{FunctionRegistry, FUNCTION_REGISTRY};
|
||||
|
||||
@@ -33,7 +33,7 @@ pub use polyval::PolyvalAccumulatorCreator;
|
||||
pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
|
||||
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::FunctionRegistry;
|
||||
|
||||
/// A function creates `AggregateFunctionCreator`.
|
||||
/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it.
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod date_add;
|
||||
mod date_sub;
|
||||
|
||||
use date_add::DateAddFunction;
|
||||
use date_sub::DateSubFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct DateFunction;
|
||||
|
||||
impl DateFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(DateAddFunction));
|
||||
registry.register(Arc::new(DateSubFunction));
|
||||
}
|
||||
}
|
||||
@@ -1,278 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
/// A function adds an interval value to Timestamp, Date or DateTime, and return the result.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DateAddFunction;
|
||||
|
||||
const NAME: &str = "date_add";
|
||||
|
||||
impl Function for DateAddFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::interval_month_day_nano_datatype(),
|
||||
ConcreteDataType::interval_year_month_datatype(),
|
||||
ConcreteDataType::interval_day_time_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let left = &columns[0];
|
||||
let right = &columns[1];
|
||||
|
||||
let size = left.len();
|
||||
let left_datatype = columns[0].data_type();
|
||||
match left_datatype {
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let ts = left.get(i).as_timestamp();
|
||||
let interval = right.get(i).as_interval();
|
||||
|
||||
let new_ts = match (ts, interval) {
|
||||
(Some(ts), Some(interval)) => ts.add_interval(interval),
|
||||
_ => ts,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_ts));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let date = left.get(i).as_date();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_date = match (date, interval) {
|
||||
(Some(date), Some(interval)) => date.add_interval(interval),
|
||||
_ => date,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_date));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let datetime = left.get(i).as_datetime();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_datetime = match (datetime, interval) {
|
||||
(Some(datetime), Some(interval)) => datetime.add_interval(interval),
|
||||
_ => datetime,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_datetime));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DateAddFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "DATE_ADD")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::{TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
|
||||
TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{DateAddFunction, *};
|
||||
|
||||
#[test]
|
||||
fn test_date_add_misc() {
|
||||
let f = DateAddFunction;
|
||||
assert_eq!("date_add", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
f.return_type(&[ConcreteDataType::timestamp_microsecond_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
f.return_type(&[ConcreteDataType::timestamp_second_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::date_datatype(),
|
||||
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
f.return_type(&[ConcreteDataType::datetime_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 18));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_date_add() {
|
||||
let f = DateAddFunction;
|
||||
|
||||
let times = vec![Some(123), None, Some(42), None];
|
||||
// Intervals in milliseconds
|
||||
let intervals = vec![1000, 2000, 3000, 1000];
|
||||
let results = [Some(124), None, Some(45), None];
|
||||
|
||||
let time_vector = TimestampSecondVector::from(times.clone());
|
||||
let interval_vector = IntervalDayTimeVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(time_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Timestamp(ts) => {
|
||||
assert_eq!(ts.value(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_date_date_add() {
|
||||
let f = DateAddFunction;
|
||||
|
||||
let dates = vec![Some(123), None, Some(42), None];
|
||||
// Intervals in months
|
||||
let intervals = vec![1, 2, 3, 1];
|
||||
let results = [Some(154), None, Some(131), None];
|
||||
|
||||
let date_vector = DateVector::from(dates.clone());
|
||||
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Date(date) => {
|
||||
assert_eq!(date.val(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_date_add() {
|
||||
let f = DateAddFunction;
|
||||
|
||||
let dates = vec![Some(123), None, Some(42), None];
|
||||
// Intervals in months
|
||||
let intervals = vec![1, 2, 3, 1];
|
||||
let results = [Some(2678400123), None, Some(7776000042), None];
|
||||
|
||||
let date_vector = DateTimeVector::from(dates.clone());
|
||||
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::DateTime(date) => {
|
||||
assert_eq!(date.val(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,291 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::ValueRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
|
||||
/// A function subtracts an interval value to Timestamp, Date or DateTime, and return the result.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DateSubFunction;
|
||||
|
||||
const NAME: &str = "date_sub";
|
||||
|
||||
impl Function for DateSubFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(input_types[0].clone())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::interval_month_day_nano_datatype(),
|
||||
ConcreteDataType::interval_year_month_datatype(),
|
||||
ConcreteDataType::interval_day_time_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let left = &columns[0];
|
||||
let right = &columns[1];
|
||||
|
||||
let size = left.len();
|
||||
let left_datatype = columns[0].data_type();
|
||||
|
||||
match left_datatype {
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let ts = left.get(i).as_timestamp();
|
||||
let interval = right.get(i).as_interval();
|
||||
|
||||
let new_ts = match (ts, interval) {
|
||||
(Some(ts), Some(interval)) => ts.sub_interval(interval),
|
||||
_ => ts,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_ts));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let date = left.get(i).as_date();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_date = match (date, interval) {
|
||||
(Some(date), Some(interval)) => date.sub_interval(interval),
|
||||
_ => date,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_date));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let mut result = left_datatype.create_mutable_vector(size);
|
||||
for i in 0..size {
|
||||
let datetime = left.get(i).as_datetime();
|
||||
let interval = right.get(i).as_interval();
|
||||
let new_datetime = match (datetime, interval) {
|
||||
(Some(datetime), Some(interval)) => datetime.sub_interval(interval),
|
||||
_ => datetime,
|
||||
};
|
||||
|
||||
result.push_value_ref(ValueRef::from(new_datetime));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DateSubFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "DATE_SUB")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::{TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, IntervalDayTimeVector, IntervalYearMonthVector,
|
||||
TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{DateSubFunction, *};
|
||||
|
||||
#[test]
|
||||
fn test_date_sub_misc() {
|
||||
let f = DateSubFunction;
|
||||
assert_eq!("date_sub", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
f.return_type(&[ConcreteDataType::timestamp_microsecond_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
f.return_type(&[ConcreteDataType::timestamp_second_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::date_datatype(),
|
||||
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
f.return_type(&[ConcreteDataType::datetime_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 18));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_date_sub() {
|
||||
let f = DateSubFunction;
|
||||
|
||||
let times = vec![Some(123), None, Some(42), None];
|
||||
// Intervals in milliseconds
|
||||
let intervals = vec![1000, 2000, 3000, 1000];
|
||||
let results = [Some(122), None, Some(39), None];
|
||||
|
||||
let time_vector = TimestampSecondVector::from(times.clone());
|
||||
let interval_vector = IntervalDayTimeVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(time_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Timestamp(ts) => {
|
||||
assert_eq!(ts.value(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_date_date_sub() {
|
||||
let f = DateSubFunction;
|
||||
let days_per_month = 30;
|
||||
|
||||
let dates = vec![
|
||||
Some(123 * days_per_month),
|
||||
None,
|
||||
Some(42 * days_per_month),
|
||||
None,
|
||||
];
|
||||
// Intervals in months
|
||||
let intervals = vec![1, 2, 3, 1];
|
||||
let results = [Some(3659), None, Some(1168), None];
|
||||
|
||||
let date_vector = DateVector::from(dates.clone());
|
||||
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Date(date) => {
|
||||
assert_eq!(date.val(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_date_sub() {
|
||||
let f = DateSubFunction;
|
||||
let millis_per_month = 3600 * 24 * 30 * 1000;
|
||||
|
||||
let dates = vec![
|
||||
Some(123 * millis_per_month),
|
||||
None,
|
||||
Some(42 * millis_per_month),
|
||||
None,
|
||||
];
|
||||
// Intervals in months
|
||||
let intervals = vec![1, 2, 3, 1];
|
||||
let results = [Some(316137600000), None, Some(100915200000), None];
|
||||
|
||||
let date_vector = DateTimeVector::from(dates.clone());
|
||||
let interval_vector = IntervalYearMonthVector::from_vec(intervals);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::DateTime(date) => {
|
||||
assert_eq!(date.val(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,13 +18,11 @@ use std::sync::{Arc, RwLock};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::function::FunctionRef;
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::function::FunctionRef;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::numpy::NumpyFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::system::SystemFunction;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FunctionRegistry {
|
||||
@@ -77,10 +75,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
MathFunction::register(&function_registry);
|
||||
NumpyFunction::register(&function_registry);
|
||||
TimestampFunction::register(&function_registry);
|
||||
DateFunction::register(&function_registry);
|
||||
|
||||
AggregateFunctions::register(&function_registry);
|
||||
SystemFunction::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
@@ -28,8 +28,9 @@ pub use pow::PowFunction;
|
||||
pub use rate::RateFunction;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use super::function::FunctionContext;
|
||||
use super::Function;
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct MathFunction;
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ use datatypes::with_match_primitive_type_id;
|
||||
use num::traits::Pow;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_binary_op, EvalContext};
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PowFunction;
|
||||
@@ -83,7 +83,6 @@ mod tests {
|
||||
use datatypes::vectors::{Float32Vector, Int8Vector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_pow_function() {
|
||||
let pow = PowFunction;
|
||||
|
||||
@@ -23,7 +23,7 @@ use datatypes::prelude::*;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
/// generates rates from a sequence of adjacent data points.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::sync::Arc;
|
||||
|
||||
use clip::ClipFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct NumpyFunction;
|
||||
|
||||
|
||||
@@ -24,8 +24,8 @@ use datatypes::prelude::*;
|
||||
use datatypes::vectors::PrimitiveVector;
|
||||
use paste::paste;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_binary_op, EvalContext};
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
/// numpy.clip function, <https://numpy.org/doc/stable/reference/generated/numpy.clip.html>
|
||||
#[derive(Clone, Debug, Default)]
|
||||
|
||||
@@ -20,8 +20,8 @@ use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::VectorRef;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_binary_op, EvalContext};
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct TestAndFunction;
|
||||
|
||||
@@ -19,7 +19,7 @@ mod to_unixtime;
|
||||
use greatest::GreatestFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
|
||||
pub(crate) struct TimestampFunction;
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GreatestFunction;
|
||||
@@ -113,7 +113,10 @@ mod tests {
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{DateVector, StringVector, Vector};
|
||||
|
||||
use super::*;
|
||||
use super::GreatestFunction;
|
||||
use crate::scalars::function::FunctionContext;
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
|
||||
@@ -23,7 +23,7 @@ use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{Int64Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
/// A function to convert the column into the unix timestamp in seconds.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -152,6 +152,7 @@ mod tests {
|
||||
};
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_string_to_unixtime() {
|
||||
|
||||
@@ -23,7 +23,7 @@ use datatypes::prelude::*;
|
||||
use datatypes::vectors::Helper;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::function::{FunctionContext, FunctionRef};
|
||||
use crate::scalars::function::{FunctionContext, FunctionRef};
|
||||
|
||||
/// Create a ScalarUdf from function.
|
||||
pub fn create_udf(func: FunctionRef) -> ScalarUdf {
|
||||
@@ -72,7 +72,7 @@ mod tests {
|
||||
use datatypes::vectors::{BooleanVector, ConstantVector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::Function;
|
||||
use crate::scalars::function::Function;
|
||||
use crate::scalars::test::TestAndFunction;
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::{StringVector, VectorRef};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
const DEFAULT_VALUE: &str = "unknown";
|
||||
|
||||
/// Generates build information
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct BuildFunction;
|
||||
|
||||
impl fmt::Display for BuildFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "BUILD")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for BuildFunction {
|
||||
fn name(&self) -> &str {
|
||||
"build"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
0,
|
||||
vec![ConcreteDataType::string_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let build_info = format!(
|
||||
"branch: {}\ncommit: {}\ncommit short: {}\ndirty: {}\nversion: {}",
|
||||
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
|
||||
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
|
||||
build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
|
||||
build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string()),
|
||||
env!("CARGO_PKG_VERSION")
|
||||
);
|
||||
|
||||
let v = Arc::new(StringVector::from(vec![build_info]));
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_build_function() {
|
||||
let build = BuildFunction;
|
||||
assert_eq!("build", build.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
build.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(build.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(0, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![ConcreteDataType::string_datatype()]
|
||||
));
|
||||
let build_info = format!(
|
||||
"branch: {}\ncommit: {}\ncommit short: {}\ndirty: {}\nversion: {}",
|
||||
build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
|
||||
build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
|
||||
build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
|
||||
build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string()),
|
||||
env!("CARGO_PKG_VERSION")
|
||||
);
|
||||
let vector = build.eval(FunctionContext::default(), &[]).unwrap();
|
||||
let expect: VectorRef = Arc::new(StringVector::from(vec![build_info]));
|
||||
assert_eq!(expect, vector);
|
||||
}
|
||||
}
|
||||
@@ -98,11 +98,7 @@ mod tests {
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column::Values;
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
use api::v1::{
|
||||
Column, ColumnDataType, ColumnDataTypeExtension, Decimal128, DecimalTypeExtension,
|
||||
IntervalMonthDayNano, SemanticType,
|
||||
};
|
||||
use api::v1::{Column, ColumnDataType, IntervalMonthDayNano, SemanticType};
|
||||
use common_base::BitVec;
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_time::interval::IntervalUnit;
|
||||
@@ -164,7 +160,7 @@ mod tests {
|
||||
|
||||
let column_defs = create_expr.column_defs;
|
||||
assert_eq!(column_defs[6].name, create_expr.time_index);
|
||||
assert_eq!(8, column_defs.len());
|
||||
assert_eq!(7, column_defs.len());
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
@@ -270,18 +266,6 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
let decimal_column = column_defs.iter().find(|c| c.name == "decimals").unwrap();
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(38, 10),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
decimal_column.data_type,
|
||||
decimal_column.datatype_extension.clone(),
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -302,7 +286,7 @@ mod tests {
|
||||
|
||||
let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
|
||||
|
||||
assert_eq!(6, add_columns.add_columns.len());
|
||||
assert_eq!(5, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
@@ -363,23 +347,6 @@ mod tests {
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
let decimal_column = &add_columns.add_columns[5];
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(38, 10),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
decimal_column.column_def.as_ref().unwrap().data_type,
|
||||
decimal_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.datatype_extension
|
||||
.clone()
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -499,23 +466,6 @@ mod tests {
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
let decimal_vals = Values {
|
||||
decimal128_values: vec![Decimal128 { hi: 0, lo: 123 }, Decimal128 { hi: 0, lo: 456 }],
|
||||
..Default::default()
|
||||
};
|
||||
let decimal_column = Column {
|
||||
column_name: "decimals".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(decimal_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::Decimal128 as i32,
|
||||
datatype_extension: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision: 38,
|
||||
scale: 10,
|
||||
})),
|
||||
}),
|
||||
};
|
||||
|
||||
(
|
||||
vec![
|
||||
@@ -526,7 +476,6 @@ mod tests {
|
||||
interval_column,
|
||||
duration_column,
|
||||
ts_column,
|
||||
decimal_column,
|
||||
],
|
||||
row_count,
|
||||
)
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::{
|
||||
AddColumn, AddColumns, Column, ColumnDataTypeExtension, ColumnDef, ColumnSchema,
|
||||
CreateTableExpr, SemanticType,
|
||||
AddColumn, AddColumns, Column, ColumnDef, ColumnSchema, CreateTableExpr, SemanticType,
|
||||
};
|
||||
use datatypes::schema::Schema;
|
||||
use snafu::{ensure, OptionExt};
|
||||
@@ -31,7 +30,6 @@ pub struct ColumnExpr<'a> {
|
||||
pub column_name: &'a str,
|
||||
pub datatype: i32,
|
||||
pub semantic_type: i32,
|
||||
pub datatype_extension: &'a Option<ColumnDataTypeExtension>,
|
||||
}
|
||||
|
||||
impl<'a> ColumnExpr<'a> {
|
||||
@@ -52,7 +50,6 @@ impl<'a> From<&'a Column> for ColumnExpr<'a> {
|
||||
column_name: &column.column_name,
|
||||
datatype: column.datatype,
|
||||
semantic_type: column.semantic_type,
|
||||
datatype_extension: &column.datatype_extension,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,7 +60,6 @@ impl<'a> From<&'a ColumnSchema> for ColumnExpr<'a> {
|
||||
column_name: &schema.column_name,
|
||||
datatype: schema.datatype,
|
||||
semantic_type: schema.semantic_type,
|
||||
datatype_extension: &schema.datatype_extension,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -98,7 +94,6 @@ pub fn build_create_table_expr(
|
||||
column_name,
|
||||
datatype,
|
||||
semantic_type,
|
||||
datatype_extension,
|
||||
} in column_exprs
|
||||
{
|
||||
let mut is_nullable = true;
|
||||
@@ -126,7 +121,7 @@ pub fn build_create_table_expr(
|
||||
default_constraint: vec![],
|
||||
semantic_type,
|
||||
comment: String::new(),
|
||||
datatype_extension: datatype_extension.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
column_defs.push(column_def);
|
||||
}
|
||||
@@ -167,7 +162,7 @@ pub fn extract_new_columns(
|
||||
default_constraint: vec![],
|
||||
semantic_type: expr.semantic_type,
|
||||
comment: String::new(),
|
||||
datatype_extension: expr.datatype_extension.clone(),
|
||||
..Default::default()
|
||||
});
|
||||
AddColumn {
|
||||
column_def,
|
||||
|
||||
@@ -16,7 +16,7 @@ common-recordbatch.workspace = true
|
||||
common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
dashmap.workspace = true
|
||||
dashmap = "5.4"
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
flatbuffers = "23.1"
|
||||
|
||||
@@ -13,9 +13,8 @@ async-recursion = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
bytes = "1.4"
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
common-macro.workspace = true
|
||||
@@ -25,25 +24,20 @@ common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
lazy_static.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with = "3"
|
||||
snafu.workspace = true
|
||||
store-api.workspace = true
|
||||
strum.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -12,18 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_telemetry::tracing_context::W3cTrace;
|
||||
use store_api::storage::{RegionNumber, TableId};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::datanode_manager::DatanodeManagerRef;
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::rpc::ddl::{CreateTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
pub mod alter_table;
|
||||
@@ -53,24 +53,14 @@ pub struct TableMetadataAllocatorContext {
|
||||
pub cluster_id: u64,
|
||||
}
|
||||
|
||||
/// Metadata allocated to a table.
|
||||
pub struct TableMetadata {
|
||||
/// Table id.
|
||||
pub table_id: TableId,
|
||||
/// Route information for each region of the table.
|
||||
pub region_routes: Vec<RegionRoute>,
|
||||
/// The encoded wal options for regions of the table.
|
||||
// If a region does not have an associated wal options, no key for the region would be found in the map.
|
||||
pub region_wal_options: HashMap<RegionNumber, String>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait TableMetadataAllocator: Send + Sync {
|
||||
async fn create(
|
||||
&self,
|
||||
ctx: &TableMetadataAllocatorContext,
|
||||
task: &CreateTableTask,
|
||||
) -> Result<TableMetadata>;
|
||||
table_info: &mut RawTableInfo,
|
||||
partitions: &[Partition],
|
||||
) -> Result<(TableId, Vec<RegionRoute>)>;
|
||||
}
|
||||
|
||||
pub type TableMetadataAllocatorRef = Arc<dyn TableMetadataAllocator>;
|
||||
@@ -80,5 +70,4 @@ pub struct DdlContext {
|
||||
pub datanode_manager: DatanodeManagerRef,
|
||||
pub cache_invalidator: CacheInvalidatorRef,
|
||||
pub table_metadata_manager: TableMetadataManagerRef,
|
||||
pub memory_region_keeper: MemoryRegionKeeperRef,
|
||||
}
|
||||
|
||||
@@ -12,42 +12,31 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::region::region_request::Body as PbRegionRequest;
|
||||
use api::v1::region::{
|
||||
CreateRequest as PbCreateRegionRequest, RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use api::v1::{ColumnDef, CreateTableExpr, SemanticType};
|
||||
use api::v1::{ColumnDef, SemanticType};
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_config::WAL_OPTIONS_KEY;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{
|
||||
ExternalSnafu, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||
};
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::utils::{handle_operate_region_error, handle_retry_error, region_storage_path};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result, TableInfoNotFoundSnafu};
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{
|
||||
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
|
||||
};
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
|
||||
pub struct CreateTableProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -61,29 +50,20 @@ impl CreateTableProcedure {
|
||||
cluster_id: u64,
|
||||
task: CreateTableTask,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
region_wal_options: HashMap<RegionNumber, String>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
creator: TableCreator::new(cluster_id, task, region_routes, region_wal_options),
|
||||
creator: TableCreator::new(cluster_id, task, region_routes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
let mut creator = TableCreator {
|
||||
data,
|
||||
opening_regions: vec![],
|
||||
};
|
||||
|
||||
creator
|
||||
.register_opening_regions(&context)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
Ok(CreateTableProcedure { context, creator })
|
||||
Ok(CreateTableProcedure {
|
||||
context,
|
||||
creator: TableCreator { data },
|
||||
})
|
||||
}
|
||||
|
||||
pub fn table_info(&self) -> &RawTableInfo {
|
||||
@@ -98,10 +78,6 @@ impl CreateTableProcedure {
|
||||
&self.creator.data.region_routes
|
||||
}
|
||||
|
||||
pub fn region_wal_options(&self) -> &HashMap<RegionNumber, String> {
|
||||
&self.creator.data.region_wal_options
|
||||
}
|
||||
|
||||
/// Checks whether the table exists.
|
||||
async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let expr = &self.creator.data.task.create_table;
|
||||
@@ -132,7 +108,7 @@ impl CreateTableProcedure {
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub fn new_region_request_builder(&self) -> Result<CreateRequestBuilder> {
|
||||
pub fn create_region_request_template(&self) -> Result<PbCreateRegionRequest> {
|
||||
let create_table_expr = &self.creator.data.task.create_table;
|
||||
|
||||
let column_defs = create_table_expr
|
||||
@@ -182,33 +158,26 @@ impl CreateTableProcedure {
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
|
||||
let template = PbCreateRegionRequest {
|
||||
Ok(PbCreateRegionRequest {
|
||||
region_id: 0,
|
||||
engine: create_table_expr.engine.to_string(),
|
||||
column_defs,
|
||||
primary_key,
|
||||
path: String::new(),
|
||||
options: create_table_expr.table_options.clone(),
|
||||
};
|
||||
|
||||
let builder = CreateRequestBuilder::new_template(self.context.clone(), template);
|
||||
Ok(builder)
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
|
||||
// Registers opening regions
|
||||
self.creator.register_opening_regions(&self.context)?;
|
||||
|
||||
let create_table_data = &self.creator.data;
|
||||
let region_routes = &create_table_data.region_routes;
|
||||
let region_wal_options = &create_table_data.region_wal_options;
|
||||
|
||||
let create_table_expr = &create_table_data.task.create_table;
|
||||
let catalog = &create_table_expr.catalog_name;
|
||||
let schema = &create_table_expr.schema_name;
|
||||
let storage_path = region_storage_path(catalog, schema);
|
||||
|
||||
let mut request_builder = self.new_region_request_builder()?;
|
||||
let request_template = self.create_region_request_template()?;
|
||||
|
||||
let leaders = find_leaders(region_routes);
|
||||
let mut create_region_tasks = Vec::with_capacity(leaders.len());
|
||||
@@ -217,20 +186,17 @@ impl CreateTableProcedure {
|
||||
let requester = self.context.datanode_manager.datanode(&datanode).await;
|
||||
|
||||
let regions = find_leader_regions(region_routes, &datanode);
|
||||
let mut requests = Vec::with_capacity(regions.len());
|
||||
for region_number in regions {
|
||||
let region_id = RegionId::new(self.table_id(), region_number);
|
||||
let create_region_request = request_builder
|
||||
.build_one(
|
||||
&self.creator.data.task.create_table,
|
||||
region_id,
|
||||
storage_path.clone(),
|
||||
region_wal_options,
|
||||
)
|
||||
.await?;
|
||||
let requests = regions
|
||||
.iter()
|
||||
.map(|region_number| {
|
||||
let region_id = RegionId::new(self.table_id(), *region_number);
|
||||
|
||||
requests.push(PbRegionRequest::Create(create_region_request));
|
||||
}
|
||||
let mut create_region_request = request_template.clone();
|
||||
create_region_request.region_id = region_id.as_u64();
|
||||
create_region_request.path = storage_path.clone();
|
||||
PbRegionRequest::Create(create_region_request)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for request in requests {
|
||||
let request = RegionRequest {
|
||||
@@ -260,9 +226,7 @@ impl CreateTableProcedure {
|
||||
|
||||
self.creator.data.state = CreateTableState::CreateMetadata;
|
||||
|
||||
// Ensures the procedures after the crash start from the `DatanodeCreateRegions` stage.
|
||||
// TODO(weny): Add more tests.
|
||||
Ok(Status::executing(false))
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_create_metadata(&self) -> Result<Status> {
|
||||
@@ -271,9 +235,8 @@ impl CreateTableProcedure {
|
||||
|
||||
let raw_table_info = self.table_info().clone();
|
||||
let region_routes = self.region_routes().clone();
|
||||
let region_wal_options = self.region_wal_options().clone();
|
||||
manager
|
||||
.create_table_metadata(raw_table_info, region_routes, region_wal_options)
|
||||
.create_table_metadata(raw_table_info, region_routes)
|
||||
.await?;
|
||||
info!("Created table metadata for table {table_id}");
|
||||
|
||||
@@ -319,57 +282,20 @@ impl Procedure for CreateTableProcedure {
|
||||
}
|
||||
|
||||
pub struct TableCreator {
|
||||
/// The serializable data.
|
||||
pub data: CreateTableData,
|
||||
/// The guards of opening.
|
||||
pub opening_regions: Vec<OperatingRegionGuard>,
|
||||
}
|
||||
|
||||
impl TableCreator {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: CreateTableTask,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
region_wal_options: HashMap<RegionNumber, String>,
|
||||
) -> Self {
|
||||
pub fn new(cluster_id: u64, task: CreateTableTask, region_routes: Vec<RegionRoute>) -> Self {
|
||||
Self {
|
||||
data: CreateTableData {
|
||||
state: CreateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
region_routes,
|
||||
region_wal_options,
|
||||
},
|
||||
opening_regions: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Register opening regions if doesn't exist.
|
||||
pub fn register_opening_regions(&mut self, context: &DdlContext) -> Result<()> {
|
||||
let region_routes = &self.data.region_routes;
|
||||
|
||||
let opening_regions = operating_leader_regions(region_routes);
|
||||
|
||||
if self.opening_regions.len() == opening_regions.len() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut opening_region_guards = Vec::with_capacity(opening_regions.len());
|
||||
|
||||
for (region_id, datanode_id) in opening_regions {
|
||||
let guard = context
|
||||
.memory_region_keeper
|
||||
.register(datanode_id, region_id)
|
||||
.context(error::RegionOperatingRaceSnafu {
|
||||
region_id,
|
||||
peer_id: datanode_id,
|
||||
})?;
|
||||
opening_region_guards.push(guard);
|
||||
}
|
||||
|
||||
self.opening_regions = opening_region_guards;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr)]
|
||||
@@ -387,7 +313,6 @@ pub struct CreateTableData {
|
||||
pub state: CreateTableState,
|
||||
pub task: CreateTableTask,
|
||||
pub region_routes: Vec<RegionRoute>,
|
||||
pub region_wal_options: HashMap<RegionNumber, String>,
|
||||
pub cluster_id: u64,
|
||||
}
|
||||
|
||||
@@ -396,91 +321,3 @@ impl CreateTableData {
|
||||
self.task.table_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [PbCreateRegionRequest].
|
||||
pub struct CreateRequestBuilder {
|
||||
context: DdlContext,
|
||||
template: PbCreateRegionRequest,
|
||||
/// Optional. Only for metric engine.
|
||||
physical_table_id: Option<TableId>,
|
||||
}
|
||||
|
||||
impl CreateRequestBuilder {
|
||||
fn new_template(context: DdlContext, template: PbCreateRegionRequest) -> Self {
|
||||
Self {
|
||||
context,
|
||||
template,
|
||||
physical_table_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn template(&self) -> &PbCreateRegionRequest {
|
||||
&self.template
|
||||
}
|
||||
|
||||
async fn build_one(
|
||||
&mut self,
|
||||
create_expr: &CreateTableExpr,
|
||||
region_id: RegionId,
|
||||
storage_path: String,
|
||||
region_wal_options: &HashMap<RegionNumber, String>,
|
||||
) -> Result<PbCreateRegionRequest> {
|
||||
let mut request = self.template.clone();
|
||||
|
||||
request.region_id = region_id.as_u64();
|
||||
request.path = storage_path;
|
||||
// Stores the encoded wal options into the request options.
|
||||
region_wal_options
|
||||
.get(®ion_id.region_number())
|
||||
.and_then(|wal_options| {
|
||||
request
|
||||
.options
|
||||
.insert(WAL_OPTIONS_KEY.to_string(), wal_options.clone())
|
||||
});
|
||||
|
||||
if self.template.engine == METRIC_ENGINE {
|
||||
self.metric_engine_hook(create_expr, region_id, &mut request)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
async fn metric_engine_hook(
|
||||
&mut self,
|
||||
create_expr: &CreateTableExpr,
|
||||
region_id: RegionId,
|
||||
request: &mut PbCreateRegionRequest,
|
||||
) -> Result<()> {
|
||||
if let Some(physical_table_name) = request.options.get(LOGICAL_TABLE_METADATA_KEY) {
|
||||
let table_id = if let Some(table_id) = self.physical_table_id {
|
||||
table_id
|
||||
} else {
|
||||
let table_name_manager = self.context.table_metadata_manager.table_name_manager();
|
||||
let table_name_key = TableNameKey::new(
|
||||
&create_expr.catalog_name,
|
||||
&create_expr.schema_name,
|
||||
physical_table_name,
|
||||
);
|
||||
let table_id = table_name_manager
|
||||
.get(table_name_key)
|
||||
.await?
|
||||
.context(TableInfoNotFoundSnafu {
|
||||
table_name: physical_table_name,
|
||||
})?
|
||||
.table_id();
|
||||
self.physical_table_id = Some(table_id);
|
||||
table_id
|
||||
};
|
||||
// Concat physical table's table id and corresponding region number to get
|
||||
// the physical region id.
|
||||
let physical_region_id = RegionId::new(table_id, region_id.region_number());
|
||||
request.options.insert(
|
||||
LOGICAL_TABLE_METADATA_KEY.to_string(),
|
||||
physical_region_id.as_u64().to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, info};
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::engine::TableReference;
|
||||
@@ -42,19 +42,12 @@ use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::ddl::DropTableTask;
|
||||
use crate::rpc::router::{
|
||||
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
|
||||
};
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
|
||||
pub struct DropTableProcedure {
|
||||
/// The context of procedure runtime.
|
||||
pub context: DdlContext,
|
||||
/// The serializable data.
|
||||
pub data: DropTableData,
|
||||
/// The guards of opening regions.
|
||||
pub dropping_regions: Vec<OperatingRegionGuard>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
@@ -71,17 +64,12 @@ impl DropTableProcedure {
|
||||
Self {
|
||||
context,
|
||||
data: DropTableData::new(cluster_id, task, table_route_value, table_info_value),
|
||||
dropping_regions: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
Ok(Self {
|
||||
context,
|
||||
data,
|
||||
dropping_regions: vec![],
|
||||
})
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
async fn on_prepare(&mut self) -> Result<Status> {
|
||||
@@ -114,42 +102,8 @@ impl DropTableProcedure {
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
/// Register dropping regions if doesn't exist.
|
||||
fn register_dropping_regions(&mut self) -> Result<()> {
|
||||
let region_routes = self.data.region_routes();
|
||||
|
||||
let dropping_regions = operating_leader_regions(region_routes);
|
||||
|
||||
if self.dropping_regions.len() == dropping_regions.len() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut dropping_region_guards = Vec::with_capacity(dropping_regions.len());
|
||||
|
||||
for (region_id, datanode_id) in dropping_regions {
|
||||
let guard = self
|
||||
.context
|
||||
.memory_region_keeper
|
||||
.register(datanode_id, region_id)
|
||||
.context(error::RegionOperatingRaceSnafu {
|
||||
region_id,
|
||||
peer_id: datanode_id,
|
||||
})?;
|
||||
dropping_region_guards.push(guard);
|
||||
}
|
||||
|
||||
self.dropping_regions = dropping_region_guards;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes the table metadata.
|
||||
async fn on_remove_metadata(&mut self) -> Result<Status> {
|
||||
// NOTES: If the meta server is crashed after the `RemoveMetadata`,
|
||||
// Corresponding regions of this table on the Datanode will be closed automatically.
|
||||
// Then any future dropping operation will fail.
|
||||
|
||||
// TODO(weny): Considers introducing a RegionStatus to indicate the region is dropping.
|
||||
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
let table_info_value = &self.data.table_info_value;
|
||||
let table_route_value = &self.data.table_route_value;
|
||||
|
||||
@@ -12,14 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{info, tracing};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::datanode_manager::DatanodeManagerRef;
|
||||
@@ -28,7 +26,7 @@ use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{
|
||||
DdlContext, DdlTaskExecutor, ExecutorContext, TableMetadata, TableMetadataAllocatorContext,
|
||||
DdlContext, DdlTaskExecutor, ExecutorContext, TableMetadataAllocatorContext,
|
||||
TableMetadataAllocatorRef,
|
||||
};
|
||||
use crate::error::{
|
||||
@@ -39,7 +37,6 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::rpc::ddl::DdlTask::{AlterTable, CreateTable, DropTable, TruncateTable};
|
||||
use crate::rpc::ddl::{
|
||||
AlterTableTask, CreateTableTask, DropTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse,
|
||||
@@ -54,8 +51,7 @@ pub struct DdlManager {
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||
memory_region_keeper: MemoryRegionKeeperRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
}
|
||||
|
||||
impl DdlManager {
|
||||
@@ -65,16 +61,14 @@ impl DdlManager {
|
||||
datanode_clients: DatanodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||
memory_region_keeper: MemoryRegionKeeperRef,
|
||||
table_meta_allocator: TableMetadataAllocatorRef,
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
procedure_manager,
|
||||
datanode_manager: datanode_clients,
|
||||
cache_invalidator,
|
||||
table_metadata_manager,
|
||||
table_metadata_allocator,
|
||||
memory_region_keeper,
|
||||
table_meta_allocator,
|
||||
};
|
||||
manager.register_loaders()?;
|
||||
Ok(manager)
|
||||
@@ -91,7 +85,6 @@ impl DdlManager {
|
||||
datanode_manager: self.datanode_manager.clone(),
|
||||
cache_invalidator: self.cache_invalidator.clone(),
|
||||
table_metadata_manager: self.table_metadata_manager.clone(),
|
||||
memory_region_keeper: self.memory_region_keeper.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,17 +171,11 @@ impl DdlManager {
|
||||
cluster_id: u64,
|
||||
create_table_task: CreateTableTask,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
region_wal_options: HashMap<RegionNumber, String>,
|
||||
) -> Result<ProcedureId> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateTableProcedure::new(
|
||||
cluster_id,
|
||||
create_table_task,
|
||||
region_routes,
|
||||
region_wal_options,
|
||||
context,
|
||||
);
|
||||
let procedure =
|
||||
CreateTableProcedure::new(cluster_id, create_table_task, region_routes, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -382,29 +369,17 @@ async fn handle_create_table_task(
|
||||
cluster_id: u64,
|
||||
mut create_table_task: CreateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_meta = ddl_manager
|
||||
.table_metadata_allocator
|
||||
let (table_id, region_routes) = ddl_manager
|
||||
.table_meta_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_table_task,
|
||||
&mut create_table_task.table_info,
|
||||
&create_table_task.partitions,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let TableMetadata {
|
||||
table_id,
|
||||
region_routes,
|
||||
region_wal_options,
|
||||
} = table_meta;
|
||||
|
||||
create_table_task.table_info.ident.table_id = table_id;
|
||||
|
||||
let id = ddl_manager
|
||||
.submit_create_table_task(
|
||||
cluster_id,
|
||||
create_table_task,
|
||||
region_routes,
|
||||
region_wal_options,
|
||||
)
|
||||
.submit_create_table_task(cluster_id, create_table_task, region_routes)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id:?} is created via procedure_id {id:?}");
|
||||
@@ -455,7 +430,9 @@ impl DdlTaskExecutor for DdlManager {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use common_procedure::local::LocalManager;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::DdlManager;
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
@@ -464,13 +441,12 @@ mod tests {
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{TableMetadataAllocator, TableMetadataAllocatorContext};
|
||||
use crate::error::Result;
|
||||
use crate::key::TableMetadataManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::region_keeper::MemoryRegionKeeper;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::state_store::KvStateStore;
|
||||
|
||||
/// A dummy implemented [DatanodeManager].
|
||||
@@ -491,8 +467,9 @@ mod tests {
|
||||
async fn create(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
_task: &CreateTableTask,
|
||||
) -> Result<TableMetadata> {
|
||||
_table_info: &mut RawTableInfo,
|
||||
_partitions: &[Partition],
|
||||
) -> Result<(TableId, Vec<RegionRoute>)> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
@@ -511,7 +488,6 @@ mod tests {
|
||||
Arc::new(DummyCacheInvalidator),
|
||||
table_metadata_manager,
|
||||
Arc::new(DummyTableMetadataAllocator),
|
||||
Arc::new(MemoryRegionKeeper::default()),
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
|
||||
@@ -14,17 +14,15 @@
|
||||
|
||||
use std::str::Utf8Error;
|
||||
|
||||
use common_config::wal::WalOptions;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::peer::Peer;
|
||||
use crate::DatanodeId;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -33,26 +31,9 @@ pub enum Error {
|
||||
#[snafu(display("Empty key is not allowed"))]
|
||||
EmptyKey { location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"Another procedure is operating the region: {} on peer: {}",
|
||||
region_id,
|
||||
peer_id
|
||||
))]
|
||||
RegionOperatingRace {
|
||||
location: Location,
|
||||
peer_id: DatanodeId,
|
||||
region_id: RegionId,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
|
||||
InvalidTxnResult { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Invalid engine type: {}", engine_type))]
|
||||
InvalidEngineType {
|
||||
engine_type: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd"))]
|
||||
ConnectEtcd {
|
||||
#[snafu(source)]
|
||||
@@ -285,51 +266,6 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Retry later"))]
|
||||
RetryLater { source: BoxedError },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to encode a wal options to json string, wal_options: {:?}",
|
||||
wal_options
|
||||
))]
|
||||
EncodeWalOptions {
|
||||
wal_options: WalOptions,
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid number of topics {}", num_topics))]
|
||||
InvalidNumTopics {
|
||||
num_topics: usize,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to build a Kafka client, broker endpoints: {:?}",
|
||||
broker_endpoints
|
||||
))]
|
||||
BuildKafkaClient {
|
||||
broker_endpoints: Vec<String>,
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: rskafka::client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build a Kafka controller client"))]
|
||||
BuildKafkaCtrlClient {
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: rskafka::client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create a Kafka wal topic"))]
|
||||
CreateKafkaWalTopic {
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: rskafka::client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("The topic pool is empty"))]
|
||||
EmptyTopicPool { location: Location },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -355,21 +291,7 @@ impl ErrorExt for Error {
|
||||
| SequenceOutOfRange { .. }
|
||||
| UnexpectedSequenceValue { .. }
|
||||
| InvalidHeartbeatResponse { .. }
|
||||
| InvalidTxnResult { .. }
|
||||
| EncodeJson { .. }
|
||||
| DecodeJson { .. }
|
||||
| PayloadNotExist { .. }
|
||||
| ConvertRawKey { .. }
|
||||
| DecodeProto { .. }
|
||||
| BuildTableMeta { .. }
|
||||
| TableRouteNotFound { .. }
|
||||
| ConvertRawTableInfo { .. }
|
||||
| RegionOperatingRace { .. }
|
||||
| EncodeWalOptions { .. }
|
||||
| BuildKafkaClient { .. }
|
||||
| BuildKafkaCtrlClient { .. }
|
||||
| CreateKafkaWalTopic { .. }
|
||||
| EmptyTopicPool { .. } => StatusCode::Unexpected,
|
||||
| InvalidTxnResult { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. }
|
||||
| GetKvCache { .. }
|
||||
@@ -379,13 +301,20 @@ impl ErrorExt for Error {
|
||||
| RenameTable { .. }
|
||||
| Unsupported { .. } => StatusCode::Internal,
|
||||
|
||||
PrimaryKeyNotFound { .. } | EmptyKey { .. } | InvalidEngineType { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
PrimaryKeyNotFound { .. } | &EmptyKey { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
|
||||
|
||||
EncodeJson { .. }
|
||||
| DecodeJson { .. }
|
||||
| PayloadNotExist { .. }
|
||||
| ConvertRawKey { .. }
|
||||
| DecodeProto { .. }
|
||||
| BuildTableMeta { .. }
|
||||
| TableRouteNotFound { .. }
|
||||
| ConvertRawTableInfo { .. } => StatusCode::Unexpected,
|
||||
|
||||
SubmitProcedure { source, .. } | WaitProcedure { source, .. } => source.status_code(),
|
||||
RegisterProcedureLoader { source, .. } => source.status_code(),
|
||||
External { source, .. } => source.status_code(),
|
||||
@@ -394,8 +323,6 @@ impl ErrorExt for Error {
|
||||
RetryLater { source, .. } => source.status_code(),
|
||||
InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
ConvertAlterTableRequest { source, .. } => source.status_code(),
|
||||
|
||||
InvalidNumTopics { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -91,31 +91,19 @@ impl Display for OpenRegion {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct OpenRegion {
|
||||
pub region_ident: RegionIdent,
|
||||
pub region_storage_path: String,
|
||||
pub region_options: HashMap<String, String>,
|
||||
#[serde(default)]
|
||||
pub region_wal_options: HashMap<String, String>,
|
||||
#[serde(default)]
|
||||
pub skip_wal_replay: bool,
|
||||
pub options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl OpenRegion {
|
||||
pub fn new(
|
||||
region_ident: RegionIdent,
|
||||
path: &str,
|
||||
region_options: HashMap<String, String>,
|
||||
region_wal_options: HashMap<String, String>,
|
||||
skip_wal_replay: bool,
|
||||
) -> Self {
|
||||
pub fn new(region_ident: RegionIdent, path: &str, options: HashMap<String, String>) -> Self {
|
||||
Self {
|
||||
region_ident,
|
||||
region_storage_path: path.to_string(),
|
||||
region_options,
|
||||
region_wal_options,
|
||||
skip_wal_replay,
|
||||
options,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -230,14 +218,12 @@ mod tests {
|
||||
},
|
||||
"test/foo",
|
||||
HashMap::new(),
|
||||
HashMap::new(),
|
||||
false,
|
||||
));
|
||||
|
||||
let serialized = serde_json::to_string(&open_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
|
||||
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","options":{}}}"#,
|
||||
serialized
|
||||
);
|
||||
|
||||
@@ -256,46 +242,4 @@ mod tests {
|
||||
serialized
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct LegacyOpenRegion {
|
||||
region_ident: RegionIdent,
|
||||
region_storage_path: String,
|
||||
region_options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compatible_serialize_open_region() {
|
||||
let region_ident = RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
engine: "mito2".to_string(),
|
||||
};
|
||||
let region_storage_path = "test/foo".to_string();
|
||||
let region_options = HashMap::from([
|
||||
("a".to_string(), "aa".to_string()),
|
||||
("b".to_string(), "bb".to_string()),
|
||||
]);
|
||||
|
||||
// Serialize a legacy OpenRegion.
|
||||
let legacy_open_region = LegacyOpenRegion {
|
||||
region_ident: region_ident.clone(),
|
||||
region_storage_path: region_storage_path.clone(),
|
||||
region_options: region_options.clone(),
|
||||
};
|
||||
let serialized = serde_json::to_string(&legacy_open_region).unwrap();
|
||||
|
||||
// Deserialize to OpenRegion.
|
||||
let deserialized = serde_json::from_str(&serialized).unwrap();
|
||||
let expected = OpenRegion {
|
||||
region_ident,
|
||||
region_storage_path,
|
||||
region_options,
|
||||
region_wal_options: HashMap::new(),
|
||||
skip_wal_replay: false,
|
||||
};
|
||||
assert_eq!(expected, deserialized);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,9 +63,7 @@ use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::warn;
|
||||
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
|
||||
use lazy_static::lazy_static;
|
||||
@@ -297,20 +295,12 @@ impl TableMetadataManager {
|
||||
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
let catalog_name = CatalogNameKey::new(DEFAULT_CATALOG_NAME);
|
||||
let schema_name = SchemaNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
|
||||
|
||||
self.catalog_manager().create(catalog_name, true).await?;
|
||||
|
||||
let internal_schemas = [
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
INFORMATION_SCHEMA_NAME,
|
||||
DEFAULT_PRIVATE_SCHEMA_NAME,
|
||||
];
|
||||
|
||||
for schema_name in internal_schemas {
|
||||
let schema_key = SchemaNameKey::new(DEFAULT_CATALOG_NAME, schema_name);
|
||||
|
||||
self.schema_manager().create(schema_key, None, true).await?;
|
||||
}
|
||||
self.schema_manager()
|
||||
.create(schema_name, None, true)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -374,7 +364,6 @@ impl TableMetadataManager {
|
||||
&self,
|
||||
mut table_info: RawTableInfo,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
region_wal_options: HashMap<RegionNumber, String>,
|
||||
) -> Result<()> {
|
||||
let region_numbers = region_routes
|
||||
.iter()
|
||||
@@ -410,7 +399,6 @@ impl TableMetadataManager {
|
||||
&engine,
|
||||
®ion_storage_path,
|
||||
region_options,
|
||||
region_wal_options,
|
||||
distribution,
|
||||
)?;
|
||||
|
||||
@@ -599,7 +587,6 @@ impl TableMetadataManager {
|
||||
current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
|
||||
new_region_routes: Vec<RegionRoute>,
|
||||
new_region_options: &HashMap<String, String>,
|
||||
new_region_wal_options: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
// Updates the datanode table key value pairs.
|
||||
let current_region_distribution =
|
||||
@@ -612,7 +599,6 @@ impl TableMetadataManager {
|
||||
current_region_distribution,
|
||||
new_region_distribution,
|
||||
new_region_options,
|
||||
new_region_wal_options,
|
||||
)?;
|
||||
|
||||
// Updates the table_route.
|
||||
@@ -841,31 +827,19 @@ mod tests {
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
// if metadata was already created, it should be ok.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let mut modified_region_routes = region_routes.clone();
|
||||
modified_region_routes.push(region_route.clone());
|
||||
// if remote metadata was exists, it should return an error.
|
||||
assert!(table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
modified_region_routes,
|
||||
HashMap::default()
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), modified_region_routes)
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
@@ -899,11 +873,7 @@ mod tests {
|
||||
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -974,11 +944,7 @@ mod tests {
|
||||
let table_id = table_info.ident.table_id;
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let new_table_name = "another_name".to_string();
|
||||
@@ -1046,11 +1012,7 @@ mod tests {
|
||||
let table_id = table_info.ident.table_id;
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let mut new_table_info = table_info.clone();
|
||||
@@ -1127,11 +1089,7 @@ mod tests {
|
||||
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -1197,11 +1155,7 @@ mod tests {
|
||||
DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
|
||||
// creates metadata.
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::default(),
|
||||
)
|
||||
.create_table_metadata(table_info.clone(), region_routes.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_datanode_table(&table_metadata_manager, table_id, ®ion_routes).await;
|
||||
@@ -1218,12 +1172,10 @@ mod tests {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
region_wal_options: HashMap::new(),
|
||||
},
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1237,12 +1189,10 @@ mod tests {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
region_wal_options: HashMap::new(),
|
||||
},
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1261,12 +1211,10 @@ mod tests {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
region_wal_options: HashMap::new(),
|
||||
},
|
||||
¤t_table_route_value,
|
||||
new_region_routes.clone(),
|
||||
&HashMap::new(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1288,12 +1236,10 @@ mod tests {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: HashMap::new(),
|
||||
region_wal_options: HashMap::new(),
|
||||
},
|
||||
&wrong_table_route_value,
|
||||
new_region_routes,
|
||||
&HashMap::new(),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
@@ -38,18 +38,14 @@ use crate::DatanodeId;
|
||||
/// For compatible reason, DON'T modify the field name.
|
||||
pub struct RegionInfo {
|
||||
#[serde(default)]
|
||||
/// The table engine, it SHOULD be immutable after created.
|
||||
// The table engine, it SHOULD be immutable after created.
|
||||
pub engine: String,
|
||||
/// The region storage path, it SHOULD be immutable after created.
|
||||
// The region storage path, it SHOULD be immutable after created.
|
||||
#[serde(default)]
|
||||
pub region_storage_path: String,
|
||||
/// The region options.
|
||||
// The region options.
|
||||
#[serde(default)]
|
||||
pub region_options: HashMap<String, String>,
|
||||
/// The per-region wal options.
|
||||
/// Key: region number (in string representation). Value: the encoded wal options of the region.
|
||||
#[serde(default)]
|
||||
pub region_wal_options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
pub struct DatanodeTableKey {
|
||||
@@ -169,21 +165,11 @@ impl DatanodeTableManager {
|
||||
engine: &str,
|
||||
region_storage_path: &str,
|
||||
region_options: HashMap<String, String>,
|
||||
region_wal_options: HashMap<RegionNumber, String>,
|
||||
distribution: RegionDistribution,
|
||||
) -> Result<Txn> {
|
||||
let txns = distribution
|
||||
.into_iter()
|
||||
.map(|(datanode_id, regions)| {
|
||||
let filtered_region_wal_options = regions
|
||||
.iter()
|
||||
.filter_map(|region_number| {
|
||||
region_wal_options
|
||||
.get(region_number)
|
||||
.map(|wal_options| (region_number.to_string(), wal_options.clone()))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let key = DatanodeTableKey::new(datanode_id, table_id);
|
||||
let val = DatanodeTableValue::new(
|
||||
table_id,
|
||||
@@ -192,7 +178,6 @@ impl DatanodeTableManager {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.to_string(),
|
||||
region_options: region_options.clone(),
|
||||
region_wal_options: filtered_region_wal_options,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -213,7 +198,6 @@ impl DatanodeTableManager {
|
||||
current_region_distribution: RegionDistribution,
|
||||
new_region_distribution: RegionDistribution,
|
||||
new_region_options: &HashMap<String, String>,
|
||||
new_region_wal_options: &HashMap<String, String>,
|
||||
) -> Result<Txn> {
|
||||
let mut opts = Vec::new();
|
||||
|
||||
@@ -225,15 +209,12 @@ impl DatanodeTableManager {
|
||||
opts.push(TxnOp::Delete(raw_key))
|
||||
}
|
||||
}
|
||||
|
||||
let need_update_options = region_info.region_options != *new_region_options;
|
||||
let need_update_wal_options = region_info.region_wal_options != *new_region_wal_options;
|
||||
|
||||
for (datanode, regions) in new_region_distribution.into_iter() {
|
||||
let need_update =
|
||||
if let Some(current_region) = current_region_distribution.get(&datanode) {
|
||||
// Updates if need.
|
||||
*current_region != regions || need_update_options || need_update_wal_options
|
||||
*current_region != regions || need_update_options
|
||||
} else {
|
||||
true
|
||||
};
|
||||
@@ -291,7 +272,7 @@ mod tests {
|
||||
region_info: RegionInfo::default(),
|
||||
version: 1,
|
||||
};
|
||||
let literal = br#"{"table_id":42,"regions":[1,2,3],"engine":"","region_storage_path":"","region_options":{},"region_wal_options":{},"version":1}"#;
|
||||
let literal = br#"{"table_id":42,"regions":[1,2,3],"engine":"","region_storage_path":"","region_options":{},"version":1}"#;
|
||||
|
||||
let raw_value = value.try_as_raw_value().unwrap();
|
||||
assert_eq!(raw_value, literal);
|
||||
@@ -305,41 +286,6 @@ mod tests {
|
||||
assert!(parsed.is_ok());
|
||||
}
|
||||
|
||||
// This test intends to ensure both the `serde_json::to_string` + `serde_json::from_str`
|
||||
// and `serde_json::to_vec` + `serde_json::from_slice` work for `DatanodeTableValue`.
|
||||
// Warning: if the key of `region_wal_options` is of type non-String, this test would fail.
|
||||
#[test]
|
||||
fn test_serde_with_region_info() {
|
||||
let region_info = RegionInfo {
|
||||
engine: "test_engine".to_string(),
|
||||
region_storage_path: "test_storage_path".to_string(),
|
||||
region_options: HashMap::from([
|
||||
("a".to_string(), "aa".to_string()),
|
||||
("b".to_string(), "bb".to_string()),
|
||||
("c".to_string(), "cc".to_string()),
|
||||
]),
|
||||
region_wal_options: HashMap::from([
|
||||
("1".to_string(), "aaa".to_string()),
|
||||
("2".to_string(), "bbb".to_string()),
|
||||
("3".to_string(), "ccc".to_string()),
|
||||
]),
|
||||
};
|
||||
let table_value = DatanodeTableValue {
|
||||
table_id: 1,
|
||||
regions: vec![],
|
||||
region_info,
|
||||
version: 1,
|
||||
};
|
||||
|
||||
let encoded = serde_json::to_string(&table_value).unwrap();
|
||||
let decoded = serde_json::from_str(&encoded).unwrap();
|
||||
assert_eq!(table_value, decoded);
|
||||
|
||||
let encoded = serde_json::to_vec(&table_value).unwrap();
|
||||
let decoded = serde_json::from_slice(&encoded).unwrap();
|
||||
assert_eq!(table_value, decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strip_table_id() {
|
||||
fn test_err(raw_key: &[u8]) {
|
||||
|
||||
@@ -16,7 +16,6 @@ use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::DeserializedValueWithBytes;
|
||||
@@ -51,29 +50,12 @@ impl TableRouteValue {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a new version [TableRouteValue] with `region_routes`.
|
||||
pub fn update(&self, region_routes: Vec<RegionRoute>) -> Self {
|
||||
Self {
|
||||
region_routes,
|
||||
version: self.version + 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the version.
|
||||
///
|
||||
/// For test purpose.
|
||||
#[cfg(any(tets, feature = "testing"))]
|
||||
pub fn version(&self) -> u64 {
|
||||
self.version
|
||||
}
|
||||
|
||||
/// Returns the corresponding [RegionRoute].
|
||||
pub fn region_route(&self, region_id: RegionId) -> Option<RegionRoute> {
|
||||
self.region_routes
|
||||
.iter()
|
||||
.find(|route| route.region.id == region_id)
|
||||
.cloned()
|
||||
}
|
||||
}
|
||||
|
||||
impl TableMetaKey for TableRouteKey {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user