mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
114 Commits
v0.17.0
...
v0.18.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a3c5f85e5 | ||
|
|
3ca5c77d91 | ||
|
|
8bcf4a8ab5 | ||
|
|
0717773f62 | ||
|
|
195ed73448 | ||
|
|
243dbde3d5 | ||
|
|
aca8b690d1 | ||
|
|
9564180a6a | ||
|
|
17d16da483 | ||
|
|
c1acce9943 | ||
|
|
0790835c77 | ||
|
|
280df064c7 | ||
|
|
11a08d1381 | ||
|
|
06a4f0abea | ||
|
|
c6e5552f05 | ||
|
|
9c8ff1d8a0 | ||
|
|
cff9cb6327 | ||
|
|
964dc254aa | ||
|
|
91a727790d | ||
|
|
07b9de620e | ||
|
|
6d0dd2540e | ||
|
|
a14c01a807 | ||
|
|
238ed003df | ||
|
|
0c038f755f | ||
|
|
b5a8725582 | ||
|
|
c7050831db | ||
|
|
f65dcd12cc | ||
|
|
80c8ab42b0 | ||
|
|
4507736528 | ||
|
|
2712c5cd7a | ||
|
|
078379816c | ||
|
|
6dc5fbe9a1 | ||
|
|
cd3fb5fd3e | ||
|
|
5fcca4eeab | ||
|
|
b3d413258d | ||
|
|
03954e8b3b | ||
|
|
bd8f5d2b71 | ||
|
|
74721a06ba | ||
|
|
18e4839a17 | ||
|
|
cbe0cf4a74 | ||
|
|
e26b98f452 | ||
|
|
d8b967408e | ||
|
|
c35407fdce | ||
|
|
edf4b3f7f8 | ||
|
|
14550429e9 | ||
|
|
ff2da4903e | ||
|
|
c92ab4217f | ||
|
|
77981a7de5 | ||
|
|
9096c5ebbf | ||
|
|
0a959f9920 | ||
|
|
85c1a91bae | ||
|
|
7aba9a18fd | ||
|
|
4c18d140b4 | ||
|
|
b8e0c49cb4 | ||
|
|
db42ad42dc | ||
|
|
8ce963f63e | ||
|
|
b3aabb6706 | ||
|
|
028effe952 | ||
|
|
d86f489a74 | ||
|
|
6c066c1a4a | ||
|
|
9ab87e11a4 | ||
|
|
9fe7069146 | ||
|
|
733a1afcd1 | ||
|
|
5e65581f94 | ||
|
|
e75e5baa63 | ||
|
|
c4b89df523 | ||
|
|
6a15e62719 | ||
|
|
2bddbe8c47 | ||
|
|
ea8125aafb | ||
|
|
49722951c6 | ||
|
|
c431b036ec | ||
|
|
c797d87210 | ||
|
|
e0ce0a6446 | ||
|
|
2f6da3b718 | ||
|
|
83290be3ba | ||
|
|
21ee981b49 | ||
|
|
44c2aa4c23 | ||
|
|
9c22092189 | ||
|
|
ad690e14d0 | ||
|
|
264d05d20e | ||
|
|
9fe84f6fbd | ||
|
|
ac051af201 | ||
|
|
948a6578fa | ||
|
|
16febbd4c2 | ||
|
|
47384c7701 | ||
|
|
c9377e7c5a | ||
|
|
658d07bfc8 | ||
|
|
24e5c9f6da | ||
|
|
4158afa618 | ||
|
|
48f5db3f5f | ||
|
|
f7c8d86ebb | ||
|
|
faae1db066 | ||
|
|
ba105e18b0 | ||
|
|
8bbb396506 | ||
|
|
8e7f2e92cc | ||
|
|
493a1efe65 | ||
|
|
40a49ddc82 | ||
|
|
2c019965be | ||
|
|
756856799f | ||
|
|
dbb76483e8 | ||
|
|
a65db7121e | ||
|
|
ee4b8708d7 | ||
|
|
2512f9456d | ||
|
|
deb728c38a | ||
|
|
9dbf6dd8d0 | ||
|
|
7cf47ccf54 | ||
|
|
c3c79e4c79 | ||
|
|
56db1d468a | ||
|
|
7fbcae1ef8 | ||
|
|
b7f507cb37 | ||
|
|
9b8f04b065 | ||
|
|
8fc42aeb27 | ||
|
|
d394f38d18 | ||
|
|
6a3791ab31 |
@@ -2,7 +2,7 @@
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --"
|
||||
|
||||
[unstable.git]
|
||||
shallow_index = true
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
filters:
|
||||
- log_store=debug
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
|
||||
40
.github/scripts/deploy-greptimedb.sh
vendored
40
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,12 +3,14 @@
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
||||
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
@@ -35,10 +37,16 @@ function add_greptime_chart() {
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
helm upgrade --install etcd "$ETCD_CHART" \
|
||||
--version "$ETCD_CHART_VERSION" \
|
||||
--create-namespace \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag="$ETCD_IMAGE_TAG" \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
@@ -48,7 +56,8 @@ function deploy_etcd_cluster() {
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||
--create-namespace \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
@@ -66,9 +75,11 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -101,15 +112,17 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
--set objectStorage.s3.root="$DATA_ROOT" \
|
||||
--set objectStorage.credentials.secretName=s3-credentials \
|
||||
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
@@ -134,7 +147,8 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
|
||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
@@ -12,9 +12,9 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
|
||||
297
Cargo.lock
generated
297
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
47
Cargo.toml
47
Cargo.toml
@@ -61,6 +61,7 @@ members = [
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/standalone",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
@@ -73,8 +74,8 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.17.0"
|
||||
edition = "2021"
|
||||
version = "0.18.0"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
@@ -122,17 +123,18 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion = "49"
|
||||
datafusion-common = "49"
|
||||
datafusion-expr = "49"
|
||||
datafusion-functions = "49"
|
||||
datafusion-functions-aggregate-common = "49"
|
||||
datafusion-optimizer = "49"
|
||||
datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc", rev = "a0a5f902158f153119316eaeec868cff3fc8a99d" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-pg-catalog = { git = "https://github.com/datafusion-contrib/datafusion-postgres", rev = "3d1b7c7d5b82dd49bafc2803259365e633f654fa" }
|
||||
datafusion-physical-expr = "49"
|
||||
datafusion-physical-plan = "49"
|
||||
datafusion-sql = "49"
|
||||
datafusion-substrait = "49"
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
@@ -145,7 +147,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "66eb089afa6baaa3ddfafabd0a4abbe317d012c3" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3e821d0d405e6733690a4e4352812ba2ff780a3e" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -174,6 +176,9 @@ opentelemetry-proto = { version = "0.30", features = [
|
||||
"logs",
|
||||
] }
|
||||
ordered-float = { version = "4.3", features = ["serde"] }
|
||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||
"server",
|
||||
] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "56.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
paste = "1.0"
|
||||
@@ -296,9 +301,6 @@ mito-codec = { path = "src/mito-codec" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||
"server",
|
||||
] }
|
||||
partition = { path = "src/partition" }
|
||||
pipeline = { path = "src/pipeline" }
|
||||
plugins = { path = "src/plugins" }
|
||||
@@ -308,6 +310,7 @@ query = { path = "src/query" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
standalone = { path = "src/standalone" }
|
||||
stat = { path = "src/common/stat" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
@@ -317,6 +320,18 @@ table = { path = "src/table" }
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
|
||||
[patch.crates-io]
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
|
||||
@@ -103,6 +103,7 @@
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
@@ -151,6 +152,7 @@
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.enable_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
@@ -377,10 +379,9 @@
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `failure_detector.threshold` | Float | `8.0` | Maximum acceptable φ before the peer is treated as failed.<br/>Lower values react faster but yield more false positives. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals.<br/>So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats.<br/>Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
@@ -402,8 +403,8 @@
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
||||
| `stats_persistence.ttl` | String | `30d` | TTL for the stats table that will be used to store the stats. Default is `30d`.<br/>Set to `0s` to disable stats persistence. |
|
||||
| `stats_persistence.interval` | String | `60s` | The interval to persist the stats. Default is `60s`.<br/>The minimum value is `60s`, if the value is less than `60s`, it will be overridden to `60s`. |
|
||||
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
||||
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
@@ -494,6 +495,7 @@
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
@@ -543,6 +545,7 @@
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.enable_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format. |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
|
||||
@@ -274,6 +274,9 @@ type = "File"
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||
#+ enable_read_cache = true
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
@@ -497,6 +500,9 @@ allow_stale_entries = false
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## Whether to enable experimental flat format.
|
||||
enable_experimental_flat_format = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
|
||||
@@ -149,20 +149,18 @@ max_metadata_value_size = "1500KiB"
|
||||
max_running_procedures = 128
|
||||
|
||||
# Failure detectors options.
|
||||
# GreptimeDB uses the Phi Accrual Failure Detector algorithm to detect datanode failures.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
## Maximum acceptable φ before the peer is treated as failed.
|
||||
## Lower values react faster but yield more false positives.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
## The minimum standard deviation of the heartbeat intervals.
|
||||
## So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary.
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
## The acceptable pause duration between heartbeats.
|
||||
## Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
@@ -274,12 +272,15 @@ ttl = "90d"
|
||||
|
||||
## Configuration options for the stats persistence.
|
||||
[stats_persistence]
|
||||
## TTL for the stats table that will be used to store the stats. Default is `30d`.
|
||||
## TTL for the stats table that will be used to store the stats.
|
||||
## Set to `0s` to disable stats persistence.
|
||||
ttl = "30d"
|
||||
## The interval to persist the stats. Default is `60s`.
|
||||
## The minimum value is `60s`, if the value is less than `60s`, it will be overridden to `60s`.
|
||||
interval = "60s"
|
||||
## Default is `0s`.
|
||||
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
||||
## It is recommended to set a small value, e.g., `3h`.
|
||||
ttl = "0s"
|
||||
## The interval to persist the stats. Default is `10m`.
|
||||
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
||||
interval = "10m"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
|
||||
@@ -361,6 +361,9 @@ data_home = "./greptimedb_data"
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
type = "File"
|
||||
|
||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
||||
#+ enable_read_cache = true
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
@@ -576,6 +579,9 @@ allow_stale_entries = false
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## Whether to enable experimental flat format.
|
||||
enable_experimental_flat_format = false
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
|
||||
@@ -30,22 +30,7 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
### Enable memory profiling for greptimedb binary
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
@@ -57,6 +42,22 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling for greptimedb docker image
|
||||
|
||||
We have memory profiling enabled and activated by default in our official docker
|
||||
image.
|
||||
|
||||
This behavior is controlled by configuration `enable_heap_profiling`:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
To disable memory profiling, set `enable_heap_profiling` to `false`.
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
|
||||
463
docs/rfcs/2025-09-08-laminar-flow.md
Normal file
463
docs/rfcs/2025-09-08-laminar-flow.md
Normal file
@@ -0,0 +1,463 @@
|
||||
---
|
||||
Feature Name: "laminar-flow"
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/TBD
|
||||
Date: 2025-09-08
|
||||
Author: "discord9 <discord9@163.com>"
|
||||
---
|
||||
|
||||
# laminar Flow
|
||||
|
||||
## Summary
|
||||
|
||||
This RFC proposes a redesign of the flow architecture where flownode becomes a lightweight in-memory state management node with an embedded frontend for direct computation. This approach optimizes resource utilization and improves scalability by eliminating network hops while maintaining clear separation between coordination and computation tasks.
|
||||
|
||||
## Motivation
|
||||
|
||||
The current flow architecture has several limitations:
|
||||
|
||||
1. **Resource Inefficiency**: Flownodes perform both state management and computation, leading to resource duplication and inefficient utilization.
|
||||
2. **Scalability Constraints**: Computation resources are tied to flownode instances, limiting horizontal scaling capabilities.
|
||||
3. **State Management Complexity**: Mixing computation with state management makes the system harder to maintain and debug.
|
||||
4. **Network Overhead**: Additional network hops between flownode and separate frontend nodes add latency.
|
||||
|
||||
The laminar Flow architecture addresses these issues by:
|
||||
- Consolidating computation within flownode through embedded frontend
|
||||
- Eliminating network overhead by removing separate frontend node communication
|
||||
- Simplifying state management by focusing flownode on its core responsibility
|
||||
- Improving system scalability and maintainability
|
||||
|
||||
## Details
|
||||
|
||||
### Architecture Overview
|
||||
|
||||
The laminar Flow architecture transforms flownode into a lightweight coordinator that maintains flow state with an embedded frontend for computation. The key components involved are:
|
||||
|
||||
1. **Flownode**: Maintains in-memory state, coordinates computation, and includes an embedded frontend for query execution
|
||||
2. **Embedded Frontend**: Executes **incremental** computations within the flownode
|
||||
3. **Datanode**: Stores final results and source data
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "laminar Flow Architecture"
|
||||
subgraph Flownode["Flownode (State Manager + Embedded Frontend)"]
|
||||
StateMap["Flow State Map<br/>Map<Timestamp, (Map<Key, Value>, Sequence)>"]
|
||||
Coordinator["Computation Coordinator"]
|
||||
subgraph EmbeddedFrontend["Embedded Frontend"]
|
||||
QueryEngine["Query Engine"]
|
||||
AggrState["__aggr_state Executor"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Datanode["Datanode"]
|
||||
Storage["Data Storage"]
|
||||
Results["Result Tables"]
|
||||
end
|
||||
end
|
||||
|
||||
Coordinator -->|Internal Query| EmbeddedFrontend
|
||||
EmbeddedFrontend -->|Incremental States| Coordinator
|
||||
Flownode -->|Incremental Results| Datanode
|
||||
EmbeddedFrontend -.->|Read Data| Datanode
|
||||
```
|
||||
|
||||
### Core Components
|
||||
|
||||
#### 1. Flow State Management
|
||||
|
||||
Flownode maintains a state map for each flow:
|
||||
|
||||
```rust
|
||||
type FlowState = Map<Timestamp, (Map<Key, Value>, Sequence)>;
|
||||
```
|
||||
|
||||
Where:
|
||||
- **Timestamp**: Time window identifier for aggregation groups
|
||||
- **Key**: Aggregation group expressions (`group_exprs`)
|
||||
- **Value**: Aggregation expressions results (`aggr_exprs`)
|
||||
- **Sequence**: Computation progress marker for incremental updates
|
||||
|
||||
#### 2. Incremental Computation Process
|
||||
|
||||
The computation process follows these steps:
|
||||
|
||||
1. **Trigger Evaluation**: Flownode determines when to trigger computation based on:
|
||||
- Time intervals (periodic updates)
|
||||
- Data volume thresholds
|
||||
- Sequence progress requirements
|
||||
|
||||
2. **Query Execution**: Flownode executes `__aggr_state` queries using its embedded frontend with:
|
||||
- Time window filters
|
||||
- Sequence range constraints
|
||||
|
||||
3. **State Update**: Flownode receives partial state results and updates its internal state:
|
||||
- Merges new values with existing aggregation state
|
||||
- Updates sequence markers to track progress
|
||||
- Identifies changed time windows for result computation
|
||||
|
||||
4. **Result Materialization**: Flownode computes final results using `__aggr_merge` operations:
|
||||
- Processes only updated time windows(and time series) for efficiency
|
||||
- Writes results back to datanode directly through its embedded frontend
|
||||
|
||||
### Detailed Workflow
|
||||
|
||||
#### Incremental State Query
|
||||
|
||||
```sql
|
||||
-- Example incremental state query executed by embedded frontend
|
||||
SELECT
|
||||
__aggr_state(avg(value)) as state,
|
||||
time_window,
|
||||
group_key
|
||||
FROM source_table
|
||||
WHERE
|
||||
timestamp >= :window_start
|
||||
AND timestamp < :window_end
|
||||
AND __sequence >= :last_sequence
|
||||
AND __sequence < :current_sequence
|
||||
-- sequence range is actually written in grpc header, but shown here for clarity
|
||||
GROUP BY time_window, group_key;
|
||||
```
|
||||
|
||||
#### State Merge Process
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant F as Flownode (Coordinator)
|
||||
participant EF as Embedded Frontend (Lightweight)
|
||||
participant DN as Datanode (Heavy Computation)
|
||||
|
||||
F->>F: Evaluate trigger conditions
|
||||
F->>EF: Execute __aggr_state query with sequence range
|
||||
EF->>DN: Send query to datanode (Heavy scan & aggregation)
|
||||
DN->>DN: Scan data and compute partial aggregation state (Heavy CPU/I/O)
|
||||
DN->>EF: Return aggregated state results
|
||||
EF->>F: Forward state results (Lightweight merge)
|
||||
F->>F: Merge with existing state
|
||||
F->>F: Update sequence markers (Lightweight)
|
||||
F->>EF: Compute incremental results with __aggr_merge
|
||||
EF->>DN: Write incremental results to datanode
|
||||
```
|
||||
|
||||
### Refill Implementation and State Management
|
||||
|
||||
#### Refill Process
|
||||
|
||||
Refill is implemented as a straightforward `__aggr_state` query with time and sequence constraints:
|
||||
|
||||
```sql
|
||||
-- Refill query for flow state recovery
|
||||
SELECT
|
||||
__aggr_state(aggregation_functions) as state,
|
||||
time_window,
|
||||
group_keys
|
||||
FROM source_table
|
||||
WHERE
|
||||
timestamp >= :refill_start_time
|
||||
AND timestamp < :refill_end_time
|
||||
AND __sequence >= :start_sequence
|
||||
AND __sequence < :end_sequence
|
||||
-- sequence range is actually written in grpc header, but shown here for clarity
|
||||
GROUP BY time_window, group_keys;
|
||||
```
|
||||
|
||||
#### State Recovery Strategy
|
||||
|
||||
1. **Recent Data (Stream Mode)**: For recent time windows, flownode refills state using incremental queries
|
||||
2. **Historical Data (Batch Mode)**: For older time windows, flownode triggers batch computation directly and no need to refill state
|
||||
3. **Hybrid Approach**: Combines stream and batch processing based on data age and availability
|
||||
|
||||
#### Mirror Write Optimization
|
||||
|
||||
Mirror writes are simplified to only transmit timestamps to flownode:
|
||||
|
||||
```rust
|
||||
struct MirrorWrite {
|
||||
timestamps: Vec<Timestamp>,
|
||||
// Removed: actual data payload
|
||||
}
|
||||
```
|
||||
|
||||
This optimization:
|
||||
- Eliminates network overhead by using embedded frontend
|
||||
- Enables flownode to track pending time windows efficiently
|
||||
- Allows flownode to decide processing mode (stream vs batch) based on timestamp age
|
||||
|
||||
Another optimization could be just send dirty time windows range for each flow to flownode directly, no need to send timestamps one by one.
|
||||
|
||||
### Query Optimization Strategies
|
||||
|
||||
#### Sequence-Based Incremental Processing
|
||||
|
||||
The core optimization relies on sequence-constrained queries:
|
||||
|
||||
```sql
|
||||
-- Optimized incremental query
|
||||
SELECT __aggr_state(expr)
|
||||
FROM table
|
||||
WHERE time_range AND sequence_range
|
||||
```
|
||||
|
||||
Benefits:
|
||||
- **Reduced Scan Volume**: Only processes data since last computation
|
||||
- **Efficient Resource Usage**: Minimizes CPU and I/O overhead
|
||||
- **Predictable Performance**: Query cost scales with incremental data size
|
||||
|
||||
#### Time Window Partitioning
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph "Time Windows"
|
||||
W1["Window 1<br/>09:00-09:05"]
|
||||
W2["Window 2<br/>09:05-09:10"]
|
||||
W3["Window 3<br/>09:10-09:15"]
|
||||
end
|
||||
|
||||
subgraph "Processing Strategy"
|
||||
W1 --> Batch["Batch Mode<br/>(Old Data)"]
|
||||
W2 --> Stream["Stream Mode<br/>(Recent Data)"]
|
||||
W3 --> Stream2["Stream Mode<br/>(Current Data)"]
|
||||
end
|
||||
```
|
||||
|
||||
### Performance Characteristics
|
||||
|
||||
#### Memory Usage
|
||||
|
||||
- **Flownode**: O(active_time_windows × group_cardinality) for state storage
|
||||
- **Embedded Frontend**: O(query_batch_size) for temporary computation
|
||||
- **Overall**: Significantly reduced compared to current architecture
|
||||
|
||||
#### Computation Distribution
|
||||
|
||||
- **Direct Processing**: Queries processed directly within flownode's embedded frontend
|
||||
- **Fault Tolerance**: Simplified error handling with fewer distributed components
|
||||
- **Scalability**: Computation capacity scales with flownode instances
|
||||
|
||||
#### Network Optimization
|
||||
|
||||
- **Reduced Payload**: Mirror writes only contain timestamps
|
||||
- **Efficient Queries**: Sequence constraints minimize data transfer
|
||||
- **Result Caching**: State results cached in flownode memory
|
||||
|
||||
### Sequential Read Implementation for Incremental Queries
|
||||
|
||||
#### Sequence Management
|
||||
|
||||
Flow maintains two critical sequences to track incremental query progress for each region:
|
||||
|
||||
- **`memtable_last_seq`**: Tracks the latest sequence number read from the memtable
|
||||
- **`sst_last_seq`**: Tracks the latest sequence number read from SST files
|
||||
|
||||
These sequences enable precise incremental data processing by defining the exact range of data to query in subsequent iterations.
|
||||
|
||||
#### Query Protocol
|
||||
|
||||
When executing incremental queries, flownode provides both sequence parameters to datanode:
|
||||
|
||||
```rust
|
||||
struct GrpcHeader {
|
||||
...
|
||||
// Sequence tracking for incremental reads
|
||||
memtable_last_seq: HashMap<RegionId, SequenceNumber>,
|
||||
sst_last_seqs: HashMap<RegionId, SequenceNumber>,
|
||||
}
|
||||
```
|
||||
|
||||
The datanode processes these parameters to return only the data within the specified sequence ranges, ensuring efficient incremental processing.
|
||||
|
||||
#### Sequence Invalidation and Refill Mechanism
|
||||
|
||||
A critical challenge occurs when data referenced by `memtable_last_seq` gets flushed from memory to disk. Since SST files only maintain a single maximum sequence number for the entire file (rather than per-record sequence tracking), precise incremental queries become impossible for the affected time ranges.
|
||||
|
||||
**Detection of Invalidation:**
|
||||
```rust
|
||||
// When memtable_last_seq data has been flushed to SST
|
||||
if memtable_last_seq_flushed_to_disk {
|
||||
// Incremental query is no longer feasible
|
||||
// Need to trigger refill for affected time ranges
|
||||
}
|
||||
```
|
||||
|
||||
**Refill Process:**
|
||||
1. **Identify Affected Time Range**: Query the time range corresponding to the flushed `memtable_last_seq` data
|
||||
2. **Full Recomputation**: Execute a complete aggregation query for the affected time windows
|
||||
3. **State Replacement**: Replace the existing flow state for these time ranges with newly computed values
|
||||
4. **Sequence Update**: Update `memtable_last_seq` to the current latest sequence, while `sst_last_seq` continues normal incremental updates
|
||||
|
||||
```sql
|
||||
-- Refill query when memtable data has been flushed
|
||||
SELECT
|
||||
__aggr_state(aggregation_functions) as state,
|
||||
time_window,
|
||||
group_keys
|
||||
FROM source_table
|
||||
WHERE
|
||||
timestamp >= :affected_time_start
|
||||
AND timestamp < :affected_time_end
|
||||
-- Full scan required since sequence precision is lost in SST
|
||||
GROUP BY time_window, group_keys;
|
||||
```
|
||||
|
||||
#### Datanode Implementation Requirements
|
||||
|
||||
Datanode must implement enhanced query processing capabilities to support sequence-based incremental reads:
|
||||
|
||||
**Input Processing:**
|
||||
- Accept `memtable_last_seq` and `sst_last_seq` parameters in query requests
|
||||
- Filter data based on sequence ranges across both memtable and SST storage layers
|
||||
|
||||
**Output Enhancement:**
|
||||
```rust
|
||||
struct OutputMeta {
|
||||
pub plan: Option<Arc<dyn ExecutionPlan>>,
|
||||
pub cost: OutputCost,
|
||||
pub sequence_info: HashMap<RegionId, SequenceInfo>, // New field for sequence tracking per regions involved in the query
|
||||
}
|
||||
|
||||
struct SequenceInfo {
|
||||
// Sequence tracking for next iteration
|
||||
max_memtable_seq: SequenceNumber, // Highest sequence from memtable in this result
|
||||
max_sst_seq: SequenceNumber, // Highest sequence from SST in this result
|
||||
}
|
||||
```
|
||||
|
||||
**Sequence Tracking Logic:**
|
||||
datanode already impl `max_sst_seq` in leader range read, can reuse similar logic for `max_memtable_seq`.
|
||||
|
||||
#### Sequence Update Strategy
|
||||
|
||||
**Normal Incremental Updates:**
|
||||
- Update both `memtable_last_seq` and `sst_last_seq` after successful query execution
|
||||
- Use returned `max_memtable_seq` and `max_sst_seq` values for next iteration
|
||||
|
||||
**Refill Scenario:**
|
||||
- Reset `memtable_last_seq` to current maximum after refill completion
|
||||
- Continue normal `sst_last_seq` updates based on successful query responses
|
||||
- Maintain separate tracking to detect future flush events
|
||||
|
||||
#### Performance Considerations
|
||||
|
||||
**Sequence Range Optimization:**
|
||||
- Minimize sequence range spans to reduce scan overhead
|
||||
- Batch multiple small incremental updates when beneficial
|
||||
- Balance between query frequency and processing efficiency
|
||||
|
||||
**Memory Management:**
|
||||
- Monitor memtable flush frequency to predict refill requirements
|
||||
- Implement adaptive query scheduling based on flush patterns
|
||||
- Optimize state storage to handle frequent updates efficiently
|
||||
|
||||
This sequential read implementation ensures reliable incremental processing while gracefully handling the complexities of storage architecture, maintaining both correctness and performance in the face of background compaction and flush operations.
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
|
||||
1. **State Management**: Implement in-memory state map in flownode
|
||||
2. **Query Interface**: Integrate `__aggr_state` query interface in embedded frontend(Already done in previous query pushdown optimizer work)
|
||||
3. **Basic Coordination**: Implement query dispatch and result collection
|
||||
4. **Sequence Tracking**: Implement sequence-based incremental processing(Can use similar interface which leader range read use)
|
||||
|
||||
After phase 1, the system should support basic flow operations with incremental updates.
|
||||
|
||||
### Phase 2: Optimization Features
|
||||
|
||||
1. **Refill Logic**: Develop state recovery mechanisms
|
||||
2. **Mirror Write Optimization**: Simplify mirror write protocol
|
||||
|
||||
### Phase 3: Advanced Features
|
||||
|
||||
1. **Load Balancing**: Implement intelligent resource allocation for partitioned flow(Flow distributed executed on multiple flownodes)
|
||||
2. **Fault Tolerance**: Add retry mechanisms and error handling
|
||||
3. **Performance Tuning**: Optimize query batching and state management
|
||||
|
||||
## Drawbacks
|
||||
|
||||
### Reduced Network Communication
|
||||
|
||||
- **Eliminated Hops**: Direct communication between flownode and datanode through embedded frontend
|
||||
- **Reduced Latency**: No separate frontend node communication overhead
|
||||
- **Simplified Network Topology**: Fewer network dependencies and failure points
|
||||
|
||||
### Complexity in Error Handling
|
||||
|
||||
- **Distributed Failures**: Need to handle failures across multiple components
|
||||
- **State Consistency**: Ensuring state consistency during partial failures
|
||||
- **Recovery Complexity**: More complex recovery procedures
|
||||
|
||||
### Datanode Resource Requirements
|
||||
|
||||
- **Computation Load**: Datanode handles the heavy computational workload for flow queries
|
||||
- **Query Interference**: Flow queries may impact regular query performance on datanode
|
||||
- **Resource Contention**: Need careful resource management and isolation on datanode
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Alternative 1: Enhanced Current Architecture
|
||||
|
||||
Keep computation in flownode but optimize through:
|
||||
- Better resource management
|
||||
- Improved query optimization
|
||||
- Enhanced state persistence
|
||||
|
||||
**Pros:**
|
||||
- Simpler architecture
|
||||
- Fewer network hops
|
||||
- Easier debugging
|
||||
|
||||
**Cons:**
|
||||
- Limited scalability
|
||||
- Resource inefficiency
|
||||
- Harder to optimize computation distribution
|
||||
|
||||
### Alternative 2: Embedded Computation
|
||||
|
||||
Embed lightweight computation engines within flownode:
|
||||
|
||||
**Pros:**
|
||||
- Reduced network communication
|
||||
- Better performance for simple queries
|
||||
- Simpler deployment
|
||||
|
||||
**Cons:**
|
||||
- Limited scalability
|
||||
- Resource constraints
|
||||
- Harder to leverage existing frontend optimizations
|
||||
|
||||
## Future Work
|
||||
|
||||
### Advanced Query Optimization
|
||||
|
||||
- **Parallel Processing**: Enable parallel execution of flow queries
|
||||
- **Query Caching**: Cache frequently executed query patterns
|
||||
|
||||
### Enhanced State Management
|
||||
|
||||
- **State Compression**: Implement efficient state serialization
|
||||
- **Distributed State**: Support state distribution across multiple flownodes
|
||||
- **State Persistence**: Add optional state persistence for durability
|
||||
|
||||
### Monitoring and Observability
|
||||
|
||||
- **Performance Metrics**: Track query execution times and resource usage
|
||||
- **State Visualization**: Provide tools for state inspection and debugging
|
||||
- **Health Monitoring**: Monitor system health and performance characteristics
|
||||
|
||||
### Integration Improvements
|
||||
|
||||
- **Embedded Frontend Optimization**: Optimize embedded frontend query planning and execution
|
||||
- **Datanode Optimization**: Optimize result writing from flownode
|
||||
- **Metasrv Coordination**: Enhanced metadata management and coordination
|
||||
|
||||
## Conclusion
|
||||
|
||||
The laminar Flow architecture represents a significant improvement over the current flow system by separating state management from computation execution. This design enables better resource utilization, improved scalability, and simplified maintenance while maintaining the core functionality of continuous aggregation.
|
||||
|
||||
The key benefits include:
|
||||
|
||||
1. **Improved Scalability**: Computation can scale independently of state management
|
||||
2. **Better Resource Utilization**: Eliminates network overhead and leverages embedded frontend infrastructure
|
||||
3. **Simplified Architecture**: Clear separation of concerns between components
|
||||
4. **Enhanced Performance**: Sequence-based incremental processing reduces computational overhead
|
||||
|
||||
While the architecture introduces some complexity in terms of distributed coordination and error handling, the benefits significantly outweigh the drawbacks, making it a compelling evolution of the flow system.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -87,6 +87,13 @@
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Remote WAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -103,6 +110,8 @@
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -802,6 +802,48 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Remote WAL
|
||||
panels:
|
||||
- title: Triggered region flush total
|
||||
type: timeseries
|
||||
description: Triggered region flush total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_flush_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Triggered region checkpoint total
|
||||
type: timeseries
|
||||
description: Triggered region checkpoint total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_checkpoint_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Topic estimated replay size
|
||||
type: timeseries
|
||||
description: Topic estimated max replay size
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: meta_topic_estimated_replay_size
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Kafka logstore's bytes traffic
|
||||
type: timeseries
|
||||
description: Kafka logstore's bytes traffic
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{logstore}}'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
@@ -948,6 +990,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Reconciliation stats
|
||||
type: timeseries
|
||||
description: Reconciliation stats
|
||||
unit: s
|
||||
queries:
|
||||
- expr: greptime_meta_reconciliation_stats
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||
- title: Reconciliation steps
|
||||
type: timeseries
|
||||
description: 'Elapsed of Reconciliation steps '
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -87,6 +87,13 @@
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Remote WAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -103,6 +110,8 @@
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -802,6 +802,48 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Remote WAL
|
||||
panels:
|
||||
- title: Triggered region flush total
|
||||
type: timeseries
|
||||
description: Triggered region flush total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_flush_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Triggered region checkpoint total
|
||||
type: timeseries
|
||||
description: Triggered region checkpoint total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_checkpoint_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Topic estimated replay size
|
||||
type: timeseries
|
||||
description: Topic estimated max replay size
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: meta_topic_estimated_replay_size
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Kafka logstore's bytes traffic
|
||||
type: timeseries
|
||||
description: Kafka logstore's bytes traffic
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{logstore}}'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
@@ -948,6 +990,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Reconciliation stats
|
||||
type: timeseries
|
||||
description: Reconciliation stats
|
||||
unit: s
|
||||
queries:
|
||||
- expr: greptime_meta_reconciliation_stats
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||
- title: Reconciliation steps
|
||||
type: timeseries
|
||||
description: 'Elapsed of Reconciliation steps '
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
41
scripts/generate_certs.sh
Executable file
41
scripts/generate_certs.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
CERT_DIR="${1:-$(dirname "$0")/../tests-integration/fixtures/certs}"
|
||||
DAYS="${2:-365}"
|
||||
|
||||
mkdir -p "${CERT_DIR}"
|
||||
cd "${CERT_DIR}"
|
||||
|
||||
echo "Generating CA certificate..."
|
||||
openssl req -new -x509 -days "${DAYS}" -nodes -text \
|
||||
-out root.crt -keyout root.key \
|
||||
-subj "/CN=GreptimeDBRootCA"
|
||||
|
||||
|
||||
echo "Generating server certificate..."
|
||||
openssl req -new -nodes -text \
|
||||
-out server.csr -keyout server.key \
|
||||
-subj "/CN=greptime"
|
||||
|
||||
openssl x509 -req -in server.csr -text -days "${DAYS}" \
|
||||
-CA root.crt -CAkey root.key -CAcreateserial \
|
||||
-out server.crt \
|
||||
-extensions v3_req -extfile <(printf "[v3_req]\nsubjectAltName=DNS:localhost,IP:127.0.0.1")
|
||||
|
||||
echo "Generating client certificate..."
|
||||
# Make sure the client certificate is for the greptimedb user
|
||||
openssl req -new -nodes -text \
|
||||
-out client.csr -keyout client.key \
|
||||
-subj "/CN=greptimedb"
|
||||
|
||||
openssl x509 -req -in client.csr -CA root.crt -CAkey root.key -CAcreateserial \
|
||||
-out client.crt -days 365 -extensions v3_req -extfile <(printf "[v3_req]\nsubjectAltName=DNS:localhost")
|
||||
|
||||
rm -f *.csr
|
||||
|
||||
echo "TLS certificates generated successfully in ${CERT_DIR}"
|
||||
|
||||
chmod 644 root.key
|
||||
chmod 644 client.key
|
||||
chmod 644 server.key
|
||||
@@ -19,8 +19,8 @@ use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
use snafu::prelude::*;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
|
||||
@@ -16,15 +16,15 @@ use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_decimal::Decimal128;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
Int8Type, Int16Type, IntervalType, TimeType, TimestampType, UInt8Type, UInt16Type,
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
@@ -295,7 +295,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
|
||||
}
|
||||
};
|
||||
let datatype_extension = match column_datatype {
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions,
|
||||
SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY,
|
||||
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
||||
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
||||
SkippingIndexType,
|
||||
};
|
||||
use greptime_proto::v1::{
|
||||
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
||||
|
||||
@@ -17,13 +17,13 @@ use std::sync::Arc;
|
||||
use common_base::secrets::SecretString;
|
||||
use digest::Digest;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use snafu::{OptionExt, ensure};
|
||||
|
||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||
use crate::user_provider::static_user_provider::{STATIC_USER_PROVIDER, StaticUserProvider};
|
||||
use crate::user_provider::watch_file_user_provider::{
|
||||
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
|
||||
WATCH_FILE_USER_PROVIDER, WatchFileUserProvider,
|
||||
};
|
||||
use crate::{UserInfoRef, UserProviderRef};
|
||||
|
||||
|
||||
@@ -22,13 +22,13 @@ mod user_provider;
|
||||
pub mod tests;
|
||||
|
||||
pub use common::{
|
||||
auth_mysql, static_user_provider_from_option, user_provider_from_option, userinfo_by_name,
|
||||
HashedPassword, Identity, Password,
|
||||
HashedPassword, Identity, Password, auth_mysql, static_user_provider_from_option,
|
||||
user_provider_from_option, userinfo_by_name,
|
||||
};
|
||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use permission::{DefaultPermissionChecker, PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use user_info::UserInfo;
|
||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||
pub use user_provider::UserProvider;
|
||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||
|
||||
/// pub type alias
|
||||
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
||||
|
||||
@@ -13,12 +13,15 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::greptime_request::Request;
|
||||
use common_telemetry::debug;
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error::{PermissionDeniedSnafu, Result};
|
||||
use crate::{PermissionCheckerRef, UserInfoRef};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{PermissionCheckerRef, UserInfo, UserInfoRef};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PermissionReq<'a> {
|
||||
@@ -35,6 +38,32 @@ pub enum PermissionReq<'a> {
|
||||
BulkInsert,
|
||||
}
|
||||
|
||||
impl<'a> PermissionReq<'a> {
|
||||
/// Returns true if the permission request is for read operations.
|
||||
pub fn is_readonly(&self) -> bool {
|
||||
match self {
|
||||
PermissionReq::GrpcRequest(Request::Query(_))
|
||||
| PermissionReq::PromQuery
|
||||
| PermissionReq::LogQuery
|
||||
| PermissionReq::PromStoreRead => true,
|
||||
PermissionReq::SqlStatement(stmt) => stmt.is_readonly(),
|
||||
|
||||
PermissionReq::GrpcRequest(_)
|
||||
| PermissionReq::Opentsdb
|
||||
| PermissionReq::LineProtocol
|
||||
| PermissionReq::PromStoreWrite
|
||||
| PermissionReq::Otlp
|
||||
| PermissionReq::LogWrite
|
||||
| PermissionReq::BulkInsert => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the permission request is for write operations.
|
||||
pub fn is_write(&self) -> bool {
|
||||
!self.is_readonly()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PermissionResp {
|
||||
Allow,
|
||||
@@ -65,3 +94,106 @@ impl PermissionChecker for Option<&PermissionCheckerRef> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The default permission checker implementation.
|
||||
/// It checks the permission mode of [DefaultUserInfo].
|
||||
pub struct DefaultPermissionChecker;
|
||||
|
||||
impl DefaultPermissionChecker {
|
||||
/// Returns a new [PermissionCheckerRef] instance.
|
||||
pub fn arc() -> PermissionCheckerRef {
|
||||
Arc::new(DefaultPermissionChecker)
|
||||
}
|
||||
}
|
||||
|
||||
impl PermissionChecker for DefaultPermissionChecker {
|
||||
fn check_permission(
|
||||
&self,
|
||||
user_info: UserInfoRef,
|
||||
req: PermissionReq,
|
||||
) -> Result<PermissionResp> {
|
||||
if let Some(default_user) = user_info.as_any().downcast_ref::<DefaultUserInfo>() {
|
||||
let permission_mode = default_user.permission_mode();
|
||||
|
||||
if req.is_readonly() && !permission_mode.can_read() {
|
||||
debug!(
|
||||
"Permission denied: read operation not allowed, user = {}, permission = {}",
|
||||
default_user.username(),
|
||||
permission_mode.as_str()
|
||||
);
|
||||
return Ok(PermissionResp::Reject);
|
||||
}
|
||||
|
||||
if req.is_write() && !permission_mode.can_write() {
|
||||
debug!(
|
||||
"Permission denied: write operation not allowed, user = {}, permission = {}",
|
||||
default_user.username(),
|
||||
permission_mode.as_str()
|
||||
);
|
||||
return Ok(PermissionResp::Reject);
|
||||
}
|
||||
}
|
||||
|
||||
// default allow all
|
||||
Ok(PermissionResp::Allow)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::user_info::PermissionMode;
|
||||
|
||||
#[test]
|
||||
fn test_default_permission_checker_allow_all_operations() {
|
||||
let checker = DefaultPermissionChecker;
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("test_user", PermissionMode::ReadWrite);
|
||||
|
||||
let read_req = PermissionReq::PromQuery;
|
||||
let write_req = PermissionReq::PromStoreWrite;
|
||||
|
||||
let read_result = checker
|
||||
.check_permission(user_info.clone(), read_req)
|
||||
.unwrap();
|
||||
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||
|
||||
assert!(matches!(read_result, PermissionResp::Allow));
|
||||
assert!(matches!(write_result, PermissionResp::Allow));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_permission_checker_readonly_user() {
|
||||
let checker = DefaultPermissionChecker;
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("readonly_user", PermissionMode::ReadOnly);
|
||||
|
||||
let read_req = PermissionReq::PromQuery;
|
||||
let write_req = PermissionReq::PromStoreWrite;
|
||||
|
||||
let read_result = checker
|
||||
.check_permission(user_info.clone(), read_req)
|
||||
.unwrap();
|
||||
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||
|
||||
assert!(matches!(read_result, PermissionResp::Allow));
|
||||
assert!(matches!(write_result, PermissionResp::Reject));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_permission_checker_writeonly_user() {
|
||||
let checker = DefaultPermissionChecker;
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("writeonly_user", PermissionMode::WriteOnly);
|
||||
|
||||
let read_req = PermissionReq::LogQuery;
|
||||
let write_req = PermissionReq::LogWrite;
|
||||
|
||||
let read_result = checker
|
||||
.check_permission(user_info.clone(), read_req)
|
||||
.unwrap();
|
||||
let write_result = checker.check_permission(user_info, write_req).unwrap();
|
||||
|
||||
assert!(matches!(read_result, PermissionResp::Reject));
|
||||
assert!(matches!(write_result, PermissionResp::Allow));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use crate::error::{
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider, auth_mysql};
|
||||
|
||||
pub struct DatabaseAuthInfo<'a> {
|
||||
pub catalog: &'a str,
|
||||
|
||||
@@ -23,17 +23,86 @@ pub trait UserInfo: Debug + Sync + Send {
|
||||
fn username(&self) -> &str;
|
||||
}
|
||||
|
||||
/// The user permission mode
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum PermissionMode {
|
||||
#[default]
|
||||
ReadWrite,
|
||||
ReadOnly,
|
||||
WriteOnly,
|
||||
}
|
||||
|
||||
impl PermissionMode {
|
||||
/// Parse permission mode from string.
|
||||
/// Supported values are:
|
||||
/// - "rw", "readwrite", "read_write" => ReadWrite
|
||||
/// - "ro", "readonly", "read_only" => ReadOnly
|
||||
/// - "wo", "writeonly", "write_only" => WriteOnly
|
||||
/// Returns None if the input string is not a valid permission mode.
|
||||
pub fn from_str(s: &str) -> Self {
|
||||
match s.to_lowercase().as_str() {
|
||||
"readwrite" | "read_write" | "rw" => PermissionMode::ReadWrite,
|
||||
"readonly" | "read_only" | "ro" => PermissionMode::ReadOnly,
|
||||
"writeonly" | "write_only" | "wo" => PermissionMode::WriteOnly,
|
||||
_ => PermissionMode::ReadWrite,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert permission mode to string.
|
||||
/// - ReadWrite => "rw"
|
||||
/// - ReadOnly => "ro"
|
||||
/// - WriteOnly => "wo"
|
||||
/// The returned string is a static string slice.
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
PermissionMode::ReadWrite => "rw",
|
||||
PermissionMode::ReadOnly => "ro",
|
||||
PermissionMode::WriteOnly => "wo",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the permission mode allows read operations.
|
||||
pub fn can_read(&self) -> bool {
|
||||
matches!(self, PermissionMode::ReadWrite | PermissionMode::ReadOnly)
|
||||
}
|
||||
|
||||
/// Returns true if the permission mode allows write operations.
|
||||
pub fn can_write(&self) -> bool {
|
||||
matches!(self, PermissionMode::ReadWrite | PermissionMode::WriteOnly)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PermissionMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DefaultUserInfo {
|
||||
username: String,
|
||||
permission_mode: PermissionMode,
|
||||
}
|
||||
|
||||
impl DefaultUserInfo {
|
||||
pub(crate) fn with_name(username: impl Into<String>) -> UserInfoRef {
|
||||
Self::with_name_and_permission(username, PermissionMode::default())
|
||||
}
|
||||
|
||||
/// Create a UserInfo with specified permission mode.
|
||||
pub(crate) fn with_name_and_permission(
|
||||
username: impl Into<String>,
|
||||
permission_mode: PermissionMode,
|
||||
) -> UserInfoRef {
|
||||
Arc::new(Self {
|
||||
username: username.into(),
|
||||
permission_mode,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn permission_mode(&self) -> &PermissionMode {
|
||||
&self.permission_mode
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInfo for DefaultUserInfo {
|
||||
@@ -45,3 +114,120 @@ impl UserInfo for DefaultUserInfo {
|
||||
self.username.as_str()
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_from_str() {
|
||||
// Test ReadWrite variants
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("readwrite"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("read_write"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("rw"), PermissionMode::ReadWrite);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("ReadWrite"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("RW"), PermissionMode::ReadWrite);
|
||||
|
||||
// Test ReadOnly variants
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("readonly"),
|
||||
PermissionMode::ReadOnly
|
||||
);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("read_only"),
|
||||
PermissionMode::ReadOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("ro"), PermissionMode::ReadOnly);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("ReadOnly"),
|
||||
PermissionMode::ReadOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("RO"), PermissionMode::ReadOnly);
|
||||
|
||||
// Test WriteOnly variants
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("writeonly"),
|
||||
PermissionMode::WriteOnly
|
||||
);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("write_only"),
|
||||
PermissionMode::WriteOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("wo"), PermissionMode::WriteOnly);
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("WriteOnly"),
|
||||
PermissionMode::WriteOnly
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str("WO"), PermissionMode::WriteOnly);
|
||||
|
||||
// Test invalid inputs default to ReadWrite
|
||||
assert_eq!(
|
||||
PermissionMode::from_str("invalid"),
|
||||
PermissionMode::ReadWrite
|
||||
);
|
||||
assert_eq!(PermissionMode::from_str(""), PermissionMode::ReadWrite);
|
||||
assert_eq!(PermissionMode::from_str("xyz"), PermissionMode::ReadWrite);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_as_str() {
|
||||
assert_eq!(PermissionMode::ReadWrite.as_str(), "rw");
|
||||
assert_eq!(PermissionMode::ReadOnly.as_str(), "ro");
|
||||
assert_eq!(PermissionMode::WriteOnly.as_str(), "wo");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_default() {
|
||||
assert_eq!(PermissionMode::default(), PermissionMode::ReadWrite);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permission_mode_round_trip() {
|
||||
let modes = [
|
||||
PermissionMode::ReadWrite,
|
||||
PermissionMode::ReadOnly,
|
||||
PermissionMode::WriteOnly,
|
||||
];
|
||||
|
||||
for mode in modes {
|
||||
let str_repr = mode.as_str();
|
||||
let parsed = PermissionMode::from_str(str_repr);
|
||||
assert_eq!(mode, parsed);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_user_info_with_name() {
|
||||
let user_info = DefaultUserInfo::with_name("test_user");
|
||||
assert_eq!(user_info.username(), "test_user");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_user_info_with_name_and_permission() {
|
||||
let user_info =
|
||||
DefaultUserInfo::with_name_and_permission("test_user", PermissionMode::ReadOnly);
|
||||
assert_eq!(user_info.username(), "test_user");
|
||||
|
||||
// Cast to DefaultUserInfo to access permission_mode
|
||||
let default_user = user_info
|
||||
.as_any()
|
||||
.downcast_ref::<DefaultUserInfo>()
|
||||
.unwrap();
|
||||
assert_eq!(default_user.permission_mode, PermissionMode::ReadOnly);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_info_as_any() {
|
||||
let user_info = DefaultUserInfo::with_name("test_user");
|
||||
let any_ref = user_info.as_any();
|
||||
assert!(any_ref.downcast_ref::<DefaultUserInfo>().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,15 +22,15 @@ use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
use crate::error::{
|
||||
IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, UserInfoRef};
|
||||
use crate::user_info::{DefaultUserInfo, PermissionMode};
|
||||
use crate::{UserInfoRef, auth_mysql};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait UserProvider: Send + Sync {
|
||||
@@ -64,11 +64,19 @@ pub trait UserProvider: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
|
||||
/// Type alias for user info map
|
||||
/// Key is username, value is (password, permission_mode)
|
||||
pub type UserInfoMap = HashMap<String, (Vec<u8>, PermissionMode)>;
|
||||
|
||||
fn load_credential_from_file(filepath: &str) -> Result<UserInfoMap> {
|
||||
// check valid path
|
||||
let path = Path::new(filepath);
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
return InvalidConfigSnafu {
|
||||
value: filepath.to_string(),
|
||||
msg: "UserProvider file must exist",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
ensure!(
|
||||
@@ -83,13 +91,19 @@ fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Ve
|
||||
.lines()
|
||||
.map_while(std::result::Result::ok)
|
||||
.filter_map(|line| {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
Some((k.to_string(), v.as_bytes().to_vec()))
|
||||
} else {
|
||||
None
|
||||
// The line format is:
|
||||
// - `username=password` - Basic user with default permissions
|
||||
// - `username:permission_mode=password` - User with specific permission mode
|
||||
// - Lines starting with '#' are treated as comments and ignored
|
||||
// - Empty lines are ignored
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
return None;
|
||||
}
|
||||
|
||||
parse_credential_line(line)
|
||||
})
|
||||
.collect::<HashMap<String, Vec<u8>>>();
|
||||
.collect::<HashMap<String, _>>();
|
||||
|
||||
ensure!(
|
||||
!credential.is_empty(),
|
||||
@@ -99,11 +113,31 @@ fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Ve
|
||||
}
|
||||
);
|
||||
|
||||
Ok(Some(credential))
|
||||
Ok(credential)
|
||||
}
|
||||
|
||||
/// Parse a line of credential in the format of `username=password` or `username:permission_mode=password`
|
||||
pub(crate) fn parse_credential_line(line: &str) -> Option<(String, (Vec<u8>, PermissionMode))> {
|
||||
let parts = line.split('=').collect::<Vec<&str>>();
|
||||
if parts.len() != 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (username_part, password) = (parts[0], parts[1]);
|
||||
let (username, permission_mode) = if let Some((user, perm)) = username_part.split_once(':') {
|
||||
(user, PermissionMode::from_str(perm))
|
||||
} else {
|
||||
(username_part, PermissionMode::default())
|
||||
};
|
||||
|
||||
Some((
|
||||
username.to_string(),
|
||||
(password.as_bytes().to_vec(), permission_mode),
|
||||
))
|
||||
}
|
||||
|
||||
fn authenticate_with_credential(
|
||||
users: &HashMap<String, Vec<u8>>,
|
||||
users: &UserInfoMap,
|
||||
input_id: Identity<'_>,
|
||||
input_pwd: Password<'_>,
|
||||
) -> Result<UserInfoRef> {
|
||||
@@ -115,7 +149,7 @@ fn authenticate_with_credential(
|
||||
msg: "blank username"
|
||||
}
|
||||
);
|
||||
let save_pwd = users.get(username).context(UserNotFoundSnafu {
|
||||
let (save_pwd, permission_mode) = users.get(username).context(UserNotFoundSnafu {
|
||||
username: username.to_string(),
|
||||
})?;
|
||||
|
||||
@@ -128,7 +162,10 @@ fn authenticate_with_credential(
|
||||
}
|
||||
);
|
||||
if save_pwd == pwd.expose_secret().as_bytes() {
|
||||
Ok(DefaultUserInfo::with_name(username))
|
||||
Ok(DefaultUserInfo::with_name_and_permission(
|
||||
username,
|
||||
*permission_mode,
|
||||
))
|
||||
} else {
|
||||
UserPasswordMismatchSnafu {
|
||||
username: username.to_string(),
|
||||
@@ -137,8 +174,9 @@ fn authenticate_with_credential(
|
||||
}
|
||||
}
|
||||
Password::MysqlNativePassword(auth_data, salt) => {
|
||||
auth_mysql(auth_data, salt, username, save_pwd)
|
||||
.map(|_| DefaultUserInfo::with_name(username))
|
||||
auth_mysql(auth_data, salt, username, save_pwd).map(|_| {
|
||||
DefaultUserInfo::with_name_and_permission(username, *permission_mode)
|
||||
})
|
||||
}
|
||||
Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
|
||||
password_type: "pg_md5",
|
||||
@@ -148,3 +186,108 @@ fn authenticate_with_credential(
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_credential_line() {
|
||||
// Basic username=password format
|
||||
let result = parse_credential_line("admin=password123");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"admin".to_string(),
|
||||
("password123".as_bytes().to_vec(), PermissionMode::default())
|
||||
))
|
||||
);
|
||||
|
||||
// Username with permission mode
|
||||
let result = parse_credential_line("user:ReadOnly=secret");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user".to_string(),
|
||||
("secret".as_bytes().to_vec(), PermissionMode::ReadOnly)
|
||||
))
|
||||
);
|
||||
let result = parse_credential_line("user:ro=secret");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user".to_string(),
|
||||
("secret".as_bytes().to_vec(), PermissionMode::ReadOnly)
|
||||
))
|
||||
);
|
||||
// Username with WriteOnly permission mode
|
||||
let result = parse_credential_line("writer:WriteOnly=mypass");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"writer".to_string(),
|
||||
("mypass".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||
))
|
||||
);
|
||||
|
||||
// Username with 'wo' as WriteOnly permission shorthand
|
||||
let result = parse_credential_line("writer:wo=mypass");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"writer".to_string(),
|
||||
("mypass".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||
))
|
||||
);
|
||||
|
||||
// Username with complex password containing special characters
|
||||
let result = parse_credential_line("admin:rw=p@ssw0rd!123");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"admin".to_string(),
|
||||
(
|
||||
"p@ssw0rd!123".as_bytes().to_vec(),
|
||||
PermissionMode::ReadWrite
|
||||
)
|
||||
))
|
||||
);
|
||||
|
||||
// Username with spaces should be preserved
|
||||
let result = parse_credential_line("user name:WriteOnly=password");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user name".to_string(),
|
||||
("password".as_bytes().to_vec(), PermissionMode::WriteOnly)
|
||||
))
|
||||
);
|
||||
|
||||
// Invalid format - no equals sign
|
||||
let result = parse_credential_line("invalid_line");
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Invalid format - multiple equals signs
|
||||
let result = parse_credential_line("user=pass=word");
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Empty password
|
||||
let result = parse_credential_line("user=");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"user".to_string(),
|
||||
("".as_bytes().to_vec(), PermissionMode::default())
|
||||
))
|
||||
);
|
||||
|
||||
// Empty username
|
||||
let result = parse_credential_line("=password");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some((
|
||||
"".to_string(),
|
||||
("password".as_bytes().to_vec(), PermissionMode::default())
|
||||
))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{FromUtf8Snafu, InvalidConfigSnafu, Result};
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::user_provider::{
|
||||
UserInfoMap, authenticate_with_credential, load_credential_from_file, parse_credential_line,
|
||||
};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
|
||||
|
||||
pub struct StaticUserProvider {
|
||||
users: HashMap<String, Vec<u8>>,
|
||||
users: UserInfoMap,
|
||||
}
|
||||
|
||||
impl StaticUserProvider {
|
||||
@@ -35,23 +35,18 @@ impl StaticUserProvider {
|
||||
})?;
|
||||
match mode {
|
||||
"file" => {
|
||||
let users = load_credential_from_file(content)?
|
||||
.context(InvalidConfigSnafu {
|
||||
value: content.to_string(),
|
||||
msg: "StaticFileUserProvider must be a valid file path",
|
||||
})?;
|
||||
let users = load_credential_from_file(content)?;
|
||||
Ok(StaticUserProvider { users })
|
||||
}
|
||||
"cmd" => content
|
||||
.split(',')
|
||||
.map(|kv| {
|
||||
let (k, v) = kv.split_once('=').context(InvalidConfigSnafu {
|
||||
parse_credential_line(kv).context(InvalidConfigSnafu {
|
||||
value: kv.to_string(),
|
||||
msg: "StaticUserProviderOption cmd values must be in format `user=pwd[,user=pwd]`",
|
||||
})?;
|
||||
Ok((k.to_string(), v.as_bytes().to_vec()))
|
||||
})
|
||||
})
|
||||
.collect::<Result<HashMap<String, Vec<u8>>>>()
|
||||
.collect::<Result<UserInfoMap>>()
|
||||
.map(|users| StaticUserProvider { users }),
|
||||
_ => InvalidConfigSnafu {
|
||||
value: mode.to_string(),
|
||||
@@ -69,7 +64,7 @@ impl StaticUserProvider {
|
||||
msg: "Expect at least one pair of username and password",
|
||||
})?;
|
||||
let username = kv.0;
|
||||
let pwd = String::from_utf8(kv.1.clone()).context(FromUtf8Snafu)?;
|
||||
let pwd = String::from_utf8(kv.1.0.clone()).context(FromUtf8Snafu)?;
|
||||
Ok((username.clone(), pwd))
|
||||
}
|
||||
}
|
||||
@@ -102,10 +97,10 @@ pub mod test {
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
|
||||
use crate::UserProvider;
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::StaticUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
||||
let re = provider
|
||||
@@ -143,12 +138,13 @@ pub mod test {
|
||||
let file = File::create(&file_path);
|
||||
let file = file.unwrap();
|
||||
let mut lw = LineWriter::new(file);
|
||||
assert!(lw
|
||||
.write_all(
|
||||
assert!(
|
||||
lw.write_all(
|
||||
b"root=123456
|
||||
admin=654321",
|
||||
)
|
||||
.is_ok());
|
||||
.is_ok()
|
||||
);
|
||||
lw.flush().unwrap();
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
@@ -20,20 +19,20 @@ use std::sync::{Arc, Mutex};
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ResultExt, ensure};
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
|
||||
use crate::user_provider::{UserInfoMap, authenticate_with_credential, load_credential_from_file};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider};
|
||||
|
||||
pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
|
||||
|
||||
type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
|
||||
type WatchedCredentialRef = Arc<Mutex<UserInfoMap>>;
|
||||
|
||||
/// A user provider that reads user credential from a file and watches the file for changes.
|
||||
///
|
||||
/// Empty file is invalid; but file not exist means every user can be authenticated.
|
||||
/// Both empty file and non-existent file are invalid and will cause initialization to fail.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct WatchFileUserProvider {
|
||||
users: WatchedCredentialRef,
|
||||
}
|
||||
@@ -108,16 +107,7 @@ impl UserProvider for WatchFileUserProvider {
|
||||
|
||||
async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
|
||||
let users = self.users.lock().expect("users credential must be valid");
|
||||
if let Some(users) = users.as_ref() {
|
||||
authenticate_with_credential(users, id, password)
|
||||
} else {
|
||||
match id {
|
||||
Identity::UserId(id, _) => {
|
||||
warn!(id, "User provider file not exist, allow all users");
|
||||
Ok(DefaultUserInfo::with_name(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
authenticate_with_credential(&users, id, password)
|
||||
}
|
||||
|
||||
async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
|
||||
@@ -133,9 +123,9 @@ pub mod test {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::UserProvider;
|
||||
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(
|
||||
provider: &dyn UserProvider,
|
||||
@@ -178,6 +168,21 @@ pub mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_initialization_with_missing_file() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let dir = create_temp_dir("test_missing_file");
|
||||
let file_path = format!("{}/non_existent_file", dir.path().to_str().unwrap());
|
||||
|
||||
// Try to create provider with non-existent file should fail
|
||||
let result = WatchFileUserProvider::new(file_path.as_str());
|
||||
assert!(result.is_err());
|
||||
|
||||
let error = result.unwrap_err();
|
||||
assert!(error.to_string().contains("UserProvider file must exist"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
@@ -202,9 +207,10 @@ pub mod test {
|
||||
|
||||
// remove the tmp file
|
||||
assert!(std::fs::remove_file(&file_path).is_ok());
|
||||
test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
|
||||
// When file is deleted during runtime, keep the last known good credentials
|
||||
test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
|
||||
test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
|
||||
test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
|
||||
|
||||
// recreate the tmp file
|
||||
assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
|
||||
|
||||
6
src/cache/src/lib.rs
vendored
6
src/cache/src/lib.rs
vendored
@@ -19,9 +19,9 @@ use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry,
|
||||
CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
||||
CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder, new_schema_cache,
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
|
||||
@@ -32,8 +32,10 @@ common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-workload.workspace = true
|
||||
dashmap.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-pg-catalog.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
@@ -47,7 +49,7 @@ paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -297,6 +297,20 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to handle query"))]
|
||||
HandleQuery {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to project schema"))]
|
||||
ProjectSchema {
|
||||
source: datatypes::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
@@ -369,6 +383,8 @@ impl ErrorExt for Error {
|
||||
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
Error::HandleQuery { source, .. } => source.status_code(),
|
||||
Error::ProjectSchema { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,25 +14,34 @@
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, Role};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::node_manager::DatanodeManagerRef;
|
||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use common_query::request::QueryRequest;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_recordbatch::util::ChainedRecordBatchStream;
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error;
|
||||
use crate::information_schema::InformationExtension;
|
||||
use crate::information_schema::{DatanodeInspectRequest, InformationExtension};
|
||||
|
||||
pub struct DistributedInformationExtension {
|
||||
meta_client: MetaClientRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
}
|
||||
|
||||
impl DistributedInformationExtension {
|
||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
||||
Self { meta_client }
|
||||
pub fn new(meta_client: MetaClientRef, datanode_manager: DatanodeManagerRef) -> Self {
|
||||
Self {
|
||||
meta_client,
|
||||
datanode_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,4 +107,39 @@ impl InformationExtension for DistributedInformationExtension {
|
||||
.map_err(BoxedError::new)
|
||||
.context(crate::error::ListFlowStatsSnafu)
|
||||
}
|
||||
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||
// Aggregate results from all datanodes
|
||||
let nodes = self
|
||||
.meta_client
|
||||
.list_nodes(Some(Role::Datanode))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(crate::error::ListNodesSnafu)?;
|
||||
|
||||
let plan = request
|
||||
.build_plan()
|
||||
.context(crate::error::DatafusionSnafu)?;
|
||||
|
||||
let mut streams = Vec::with_capacity(nodes.len());
|
||||
for node in nodes {
|
||||
let client = self.datanode_manager.datanode(&node.peer).await;
|
||||
let stream = client
|
||||
.handle_query(QueryRequest {
|
||||
plan: plan.clone(),
|
||||
region_id: RegionId::default(),
|
||||
header: None,
|
||||
})
|
||||
.await
|
||||
.context(crate::error::HandleQuerySnafu)?;
|
||||
streams.push(stream);
|
||||
}
|
||||
|
||||
let chained =
|
||||
ChainedRecordBatchStream::new(streams).context(crate::error::CreateRecordBatchSnafu)?;
|
||||
Ok(Box::pin(chained))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,4 +21,4 @@ mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||
|
||||
@@ -16,8 +16,8 @@ use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
@@ -26,8 +26,8 @@ use partition::manager::PartitionRuleManager;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
|
||||
@@ -24,12 +24,12 @@ use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::debug;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
@@ -461,17 +461,17 @@ impl KvBackend for MetaKvBackend {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
|
||||
use super::CachedKvBackend;
|
||||
|
||||
@@ -26,12 +26,12 @@ use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||
ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
@@ -41,15 +41,16 @@ use partition::manager::PartitionRuleManagerRef;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::TableRef;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::PartitionRules;
|
||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
@@ -59,9 +60,8 @@ use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
///
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_meta::instruction::CacheIdent;
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
pub type TableCacheRef = Arc<TableCache>;
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
|
||||
@@ -14,4 +14,4 @@
|
||||
|
||||
pub mod manager;
|
||||
|
||||
pub use manager::{new_memory_catalog_manager, MemoryCatalogManager};
|
||||
pub use manager::{MemoryCatalogManager, new_memory_catalog_manager};
|
||||
|
||||
@@ -28,8 +28,8 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
@@ -419,7 +419,7 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use futures_util::TryStreamExt;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -454,16 +454,18 @@ mod tests {
|
||||
tables[0].table_info().table_id()
|
||||
);
|
||||
|
||||
assert!(catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
"not_exists",
|
||||
None
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(
|
||||
catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
"not_exists",
|
||||
None
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -486,11 +488,13 @@ mod tests {
|
||||
table: NumbersTable::table(2333),
|
||||
};
|
||||
catalog.register_table_sync(register_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
assert!(
|
||||
catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some()
|
||||
);
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -498,10 +502,12 @@ mod tests {
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(
|
||||
catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@@ -30,7 +30,7 @@ use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use rand::random;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error;
|
||||
|
||||
@@ -137,21 +137,24 @@ impl DataSource for SystemTableDataSource {
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, BoxedError> {
|
||||
let projection = request.projection.clone();
|
||||
let projected_schema = match &projection {
|
||||
let projected_schema = match &request.projection {
|
||||
Some(projection) => self.try_project(projection)?,
|
||||
None => self.table.schema(),
|
||||
};
|
||||
|
||||
let projection = request.projection.clone();
|
||||
let stream = self
|
||||
.table
|
||||
.to_stream(request)
|
||||
.map_err(BoxedError::new)
|
||||
.context(TablesRecordBatchSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.map(move |batch| match &projection {
|
||||
Some(p) => batch.and_then(|b| b.try_project(p)),
|
||||
None => batch,
|
||||
.map(move |batch| match (&projection, batch) {
|
||||
// Some tables (e.g., inspect tables) already honor projection in their inner stream;
|
||||
// others ignore it and return full rows. We will only apply projection here if the
|
||||
// inner batch width doesn't match the projection size.
|
||||
(Some(p), Ok(b)) if b.num_columns() != p.len() => b.try_project(p),
|
||||
(_, res) => res,
|
||||
});
|
||||
|
||||
let stream = RecordBatchStreamWrapper {
|
||||
|
||||
@@ -24,6 +24,7 @@ pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
mod ssts;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
@@ -36,22 +37,26 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::logical_expr::LogicalPlan;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use process_list::InformationSchemaProcessList;
|
||||
use store_api::sst_entry::{ManifestSstEntry, StorageSstEntry};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
use table::metadata::TableType;
|
||||
pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
@@ -62,6 +67,9 @@ use crate::system_schema::information_schema::partitions::InformationSchemaParti
|
||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||
use crate::system_schema::information_schema::ssts::{
|
||||
InformationSchemaSstsManifest, InformationSchemaSstsStorage,
|
||||
};
|
||||
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
|
||||
use crate::system_schema::information_schema::tables::InformationSchemaTables;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
@@ -69,7 +77,6 @@ pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::{
|
||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
// Memory tables in `information_schema`.
|
||||
@@ -250,6 +257,12 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
.process_manager
|
||||
.as_ref()
|
||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||
SSTS_MANIFEST => Some(Arc::new(InformationSchemaSstsManifest::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
SSTS_STORAGE => Some(Arc::new(InformationSchemaSstsStorage::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -321,6 +334,14 @@ impl InformationSchemaProvider {
|
||||
REGION_STATISTICS.to_string(),
|
||||
self.build_table(REGION_STATISTICS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
SSTS_MANIFEST.to_string(),
|
||||
self.build_table(SSTS_MANIFEST).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
SSTS_STORAGE.to_string(),
|
||||
self.build_table(SSTS_STORAGE).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
@@ -409,8 +430,43 @@ pub trait InformationExtension {
|
||||
|
||||
/// Get the flow statistics. If no flownode is available, return `None`.
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||
|
||||
/// Inspects the datanode.
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error>;
|
||||
}
|
||||
|
||||
/// The request to inspect the datanode.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct DatanodeInspectRequest {
|
||||
/// Kind to fetch from datanode.
|
||||
pub kind: DatanodeInspectKind,
|
||||
|
||||
/// Pushdown scan configuration (projection/predicate/limit) for the returned stream.
|
||||
/// This allows server-side filtering to reduce I/O and network costs.
|
||||
pub scan: ScanRequest,
|
||||
}
|
||||
|
||||
/// The kind of the datanode inspect request.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum DatanodeInspectKind {
|
||||
/// List SST entries recorded in manifest
|
||||
SstManifest,
|
||||
/// List SST entries discovered in storage layer
|
||||
SstStorage,
|
||||
}
|
||||
|
||||
impl DatanodeInspectRequest {
|
||||
/// Builds a logical plan for the datanode inspect request.
|
||||
pub fn build_plan(self) -> std::result::Result<LogicalPlan, DataFusionError> {
|
||||
match self.kind {
|
||||
DatanodeInspectKind::SstManifest => ManifestSstEntry::build_plan(self.scan),
|
||||
DatanodeInspectKind::SstStorage => StorageSstEntry::build_plan(self.scan),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct NoopInformationExtension;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -432,4 +488,11 @@ impl InformationExtension for NoopInformationExtension {
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
_request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||
Ok(common_recordbatch::RecordBatches::empty().as_stream())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,37 +18,46 @@ use std::time::Duration;
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_meta::cluster::{DatanodeStatus, NodeInfo, NodeStatus};
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::timestamp::Timestamp;
|
||||
use common_workload::DatanodeWorkloadType;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, CLUSTER_INFO};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{CLUSTER_INFO, InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
|
||||
const PEER_TYPE_FRONTEND: &str = "FRONTEND";
|
||||
const PEER_TYPE_METASRV: &str = "METASRV";
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const CPUS: &str = "cpus";
|
||||
const MEMORY_BYTES: &str = "memory_bytes";
|
||||
const VERSION: &str = "version";
|
||||
const GIT_COMMIT: &str = "git_commit";
|
||||
const START_TIME: &str = "start_time";
|
||||
const UPTIME: &str = "uptime";
|
||||
const ACTIVE_TIME: &str = "active_time";
|
||||
const NODE_STATUS: &str = "node_status";
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -57,11 +66,14 @@ const INIT_CAPACITY: usize = 42;
|
||||
/// - `peer_id`: the peer server id.
|
||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||
/// - `peer_addr`: the peer gRPC address.
|
||||
/// - `cpus`: the number of CPUs of the peer.
|
||||
/// - `memory_bytes`: the memory bytes of the peer.
|
||||
/// - `version`: the build package version of the peer.
|
||||
/// - `git_commit`: the build git commit hash of the peer.
|
||||
/// - `start_time`: the starting time of the peer.
|
||||
/// - `uptime`: the uptime of the peer.
|
||||
/// - `active_time`: the time since the last activity of the peer.
|
||||
/// - `node_status`: the status info of the peer.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaClusterInfo {
|
||||
@@ -82,6 +94,8 @@ impl InformationSchemaClusterInfo {
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CPUS, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(MEMORY_BYTES, ConcreteDataType::uint64_datatype(), false),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
@@ -91,6 +105,7 @@ impl InformationSchemaClusterInfo {
|
||||
),
|
||||
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(NODE_STATUS, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
@@ -140,11 +155,14 @@ struct InformationSchemaClusterInfoBuilder {
|
||||
peer_ids: Int64VectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
cpus: UInt32VectorBuilder,
|
||||
memory_bytes: UInt64VectorBuilder,
|
||||
versions: StringVectorBuilder,
|
||||
git_commits: StringVectorBuilder,
|
||||
start_times: TimestampMillisecondVectorBuilder,
|
||||
uptimes: StringVectorBuilder,
|
||||
active_times: StringVectorBuilder,
|
||||
node_status: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfoBuilder {
|
||||
@@ -155,11 +173,14 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
cpus: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memory_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
node_status: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,9 +197,10 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
|
||||
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||
let peer_type = node_info.status.role_name();
|
||||
let peer_id = peer_id(peer_type, node_info.peer.id);
|
||||
|
||||
let row = [
|
||||
(PEER_ID, &Value::from(node_info.peer.id)),
|
||||
(PEER_ID, &Value::from(peer_id)),
|
||||
(PEER_TYPE, &Value::from(peer_type)),
|
||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||
(VERSION, &Value::from(node_info.version.as_str())),
|
||||
@@ -189,13 +211,7 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
||||
// Always set peer_id to be -1 for frontends and metasrvs
|
||||
self.peer_ids.push(Some(-1));
|
||||
} else {
|
||||
self.peer_ids.push(Some(node_info.peer.id as i64));
|
||||
}
|
||||
|
||||
self.peer_ids.push(Some(peer_id));
|
||||
self.peer_types.push(Some(peer_type));
|
||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||
self.versions.push(Some(&node_info.version));
|
||||
@@ -212,6 +228,8 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
self.start_times.push(None);
|
||||
self.uptimes.push(None);
|
||||
}
|
||||
self.cpus.push(Some(node_info.cpus));
|
||||
self.memory_bytes.push(Some(node_info.memory_bytes));
|
||||
|
||||
if node_info.last_activity_ts > 0 {
|
||||
self.active_times.push(Some(
|
||||
@@ -220,6 +238,8 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
} else {
|
||||
self.active_times.push(None);
|
||||
}
|
||||
self.node_status
|
||||
.push(format_node_status(&node_info).as_deref());
|
||||
}
|
||||
|
||||
fn format_duration_since(ts: u64) -> String {
|
||||
@@ -233,11 +253,14 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.cpus.finish()),
|
||||
Arc::new(self.memory_bytes.finish()),
|
||||
Arc::new(self.versions.finish()),
|
||||
Arc::new(self.git_commits.finish()),
|
||||
Arc::new(self.start_times.finish()),
|
||||
Arc::new(self.uptimes.finish()),
|
||||
Arc::new(self.active_times.finish()),
|
||||
Arc::new(self.node_status.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
@@ -263,3 +286,56 @@ impl DfPartitionStream for InformationSchemaClusterInfo {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn peer_id(peer_type: &str, peer_id: u64) -> i64 {
|
||||
if peer_type == PEER_TYPE_FRONTEND || peer_type == PEER_TYPE_METASRV {
|
||||
-1
|
||||
} else {
|
||||
peer_id as i64
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DisplayMetasrvStatus {
|
||||
is_leader: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DisplayDatanodeStatus {
|
||||
workloads: Vec<DatanodeWorkloadType>,
|
||||
leader_regions: usize,
|
||||
follower_regions: usize,
|
||||
}
|
||||
|
||||
impl From<&DatanodeStatus> for DisplayDatanodeStatus {
|
||||
fn from(status: &DatanodeStatus) -> Self {
|
||||
Self {
|
||||
workloads: status
|
||||
.workloads
|
||||
.types
|
||||
.iter()
|
||||
.flat_map(|w| DatanodeWorkloadType::from_i32(*w))
|
||||
.collect(),
|
||||
leader_regions: status.leader_regions,
|
||||
follower_regions: status.follower_regions,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn format_node_status(node_info: &NodeInfo) -> Option<String> {
|
||||
match &node_info.status {
|
||||
NodeStatus::Datanode(datanode_status) => {
|
||||
serde_json::to_string(&DisplayDatanodeStatus::from(datanode_status)).ok()
|
||||
}
|
||||
NodeStatus::Frontend(_) => None,
|
||||
NodeStatus::Flownode(_) => None,
|
||||
NodeStatus::Metasrv(metasrv_status) => {
|
||||
if metasrv_status.is_leader {
|
||||
serde_json::to_string(&DisplayMetasrvStatus { is_leader: true }).ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
NodeStatus::Standalone => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,9 +23,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
@@ -38,12 +38,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, COLUMNS};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::information_schema::{COLUMNS, InformationTable};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
|
||||
@@ -16,10 +16,10 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::FlowId;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
@@ -38,14 +38,14 @@ use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::information_schema::{FLOWS, Predicates};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
|
||||
@@ -89,9 +89,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info
|
||||
.commit_short
|
||||
.to_string()])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
build_info.commit_short.to_string(),
|
||||
])),
|
||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||
],
|
||||
@@ -369,17 +369,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
TRIGGERS => (
|
||||
vec![
|
||||
string_column("TRIGGER_NAME"),
|
||||
ColumnSchema::new(
|
||||
"trigger_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
|
||||
string_column("TRIGGER_DEFINITION"),
|
||||
ColumnSchema::new(
|
||||
"flownode_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
|
||||
],
|
||||
vec![],
|
||||
),
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, FulltextBackend, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -31,11 +31,11 @@ use futures_util::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, KEY_COLUMN_USAGE};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, KEY_COLUMN_USAGE, Predicates};
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
pub const CONSTRAINT_NAME: &str = "constraint_name";
|
||||
@@ -277,15 +277,15 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
constraints.push(CONSTRAINT_NAME_INVERTED_INDEX);
|
||||
greptime_index_type.push(INDEX_TYPE_INVERTED_INDEX);
|
||||
}
|
||||
if let Ok(Some(options)) = column.fulltext_options() {
|
||||
if options.enable {
|
||||
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
||||
let index_type = match options.backend {
|
||||
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
||||
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
||||
};
|
||||
greptime_index_type.push(index_type);
|
||||
}
|
||||
if let Ok(Some(options)) = column.fulltext_options()
|
||||
&& options.enable
|
||||
{
|
||||
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
||||
let index_type = match options.backend {
|
||||
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
||||
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
||||
};
|
||||
greptime_index_type.push(index_type);
|
||||
}
|
||||
if column.is_skipping_indexed() {
|
||||
constraints.push(CONSTRAINT_NAME_SKIPPING_INDEX);
|
||||
|
||||
@@ -21,17 +21,16 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMicrosecond;
|
||||
use datatypes::timestamp::TimestampSecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
StringVectorBuilder, TimestampSecondVector, TimestampSecondVectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
@@ -39,13 +38,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, PARTITIONS};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, PARTITIONS, Predicates};
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
@@ -129,17 +128,17 @@ impl InformationSchemaPartitions {
|
||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"create_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"update_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"check_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||
@@ -212,7 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
create_times: TimestampMicrosecondVectorBuilder,
|
||||
create_times: TimestampSecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
|
||||
@@ -232,7 +231,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
@@ -331,8 +330,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
.push(Some((index + 1) as i64));
|
||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||
self.partition_expressions.push(expression.as_deref());
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
self.create_times.push(Some(TimestampSecond::from(
|
||||
table_info.meta.created_on.timestamp(),
|
||||
)));
|
||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||
}
|
||||
@@ -349,8 +348,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
Arc::new(Int64Vector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
||||
let null_timestamp_second_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampSecondVector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let partition_methods = Arc::new(ConstantVector::new(
|
||||
@@ -380,8 +379,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_i64_vector.clone(),
|
||||
Arc::new(self.create_times.finish()),
|
||||
// TODO(dennis): supports update_time
|
||||
null_timestampmicrosecond_vector.clone(),
|
||||
null_timestampmicrosecond_vector,
|
||||
null_timestamp_second_vector.clone(),
|
||||
null_timestamp_second_vector,
|
||||
null_i64_vector,
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
|
||||
@@ -22,9 +22,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
@@ -33,10 +33,10 @@ use datatypes::vectors::{StringVectorBuilder, TimestampMillisecondVectorBuilder}
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, PROCEDURE_INFO};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, PROCEDURE_INFO, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
|
||||
const PROCEDURE_ID: &str = "procedure_id";
|
||||
const PROCEDURE_TYPE: &str = "procedure_type";
|
||||
|
||||
@@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::common::HashMap;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -35,13 +35,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, REGION_PEERS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
|
||||
@@ -30,16 +30,17 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, UInt64VectorB
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, REGION_STATISTICS};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const REGION_NUMBER: &str = "region_number";
|
||||
const REGION_ROWS: &str = "region_rows";
|
||||
const WRITTEN_BYTES: &str = "written_bytes_since_open";
|
||||
const DISK_SIZE: &str = "disk_size";
|
||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||
const MANIFEST_SIZE: &str = "manifest_size";
|
||||
@@ -57,6 +58,7 @@ const INIT_CAPACITY: usize = 42;
|
||||
/// - `table_id`: The table id.
|
||||
/// - `region_number`: The region number.
|
||||
/// - `region_rows`: The number of rows in region.
|
||||
/// - `written_bytes_since_open`: The total bytes written of the region since region opened.
|
||||
/// - `memtable_size`: The memtable size in bytes.
|
||||
/// - `disk_size`: The approximate disk size in bytes.
|
||||
/// - `manifest_size`: The manifest size in bytes.
|
||||
@@ -84,6 +86,7 @@ impl InformationSchemaRegionStatistics {
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(WRITTEN_BYTES, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
@@ -147,6 +150,7 @@ struct InformationSchemaRegionStatisticsBuilder {
|
||||
table_ids: UInt32VectorBuilder,
|
||||
region_numbers: UInt32VectorBuilder,
|
||||
region_rows: UInt64VectorBuilder,
|
||||
written_bytes: UInt64VectorBuilder,
|
||||
disk_sizes: UInt64VectorBuilder,
|
||||
memtable_sizes: UInt64VectorBuilder,
|
||||
manifest_sizes: UInt64VectorBuilder,
|
||||
@@ -166,6 +170,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
written_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -197,6 +202,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
||||
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
||||
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
||||
(WRITTEN_BYTES, &Value::from(region_stat.written_bytes)),
|
||||
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||
@@ -216,6 +222,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
self.region_numbers
|
||||
.push(Some(region_stat.id.region_number()));
|
||||
self.region_rows.push(Some(region_stat.num_rows));
|
||||
self.written_bytes.push(Some(region_stat.written_bytes));
|
||||
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||
@@ -232,6 +239,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.region_numbers.finish()),
|
||||
Arc::new(self.region_rows.finish()),
|
||||
Arc::new(self.written_bytes.finish()),
|
||||
Arc::new(self.disk_sizes.finish()),
|
||||
Arc::new(self.memtable_sizes.finish()),
|
||||
Arc::new(self.manifest_sizes.finish()),
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -31,13 +31,13 @@ use datatypes::vectors::StringVectorBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, SCHEMATA};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
pub const SCHEMA_NAME: &str = "schema_name";
|
||||
|
||||
142
src/catalog/src/system_schema/information_schema/ssts.rs
Normal file
142
src/catalog/src/system_schema/information_schema/ssts.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{
|
||||
INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID, INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_recordbatch::adapter::AsyncRecordBatchStreamAdapter;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::sst_entry::{ManifestSstEntry, StorageSstEntry};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{ProjectSchemaSnafu, Result};
|
||||
use crate::information_schema::{
|
||||
DatanodeInspectKind, DatanodeInspectRequest, InformationTable, SSTS_MANIFEST, SSTS_STORAGE,
|
||||
};
|
||||
use crate::system_schema::utils;
|
||||
|
||||
/// Information schema table for sst manifest.
|
||||
pub struct InformationSchemaSstsManifest {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaSstsManifest {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: ManifestSstEntry::schema(),
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaSstsManifest {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
SSTS_MANIFEST
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = if let Some(p) = &request.projection {
|
||||
Arc::new(self.schema.try_project(p).context(ProjectSchemaSnafu)?)
|
||||
} else {
|
||||
self.schema.clone()
|
||||
};
|
||||
let info_ext = utils::information_extension(&self.catalog_manager)?;
|
||||
let req = DatanodeInspectRequest {
|
||||
kind: DatanodeInspectKind::SstManifest,
|
||||
scan: request,
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
info_ext
|
||||
.inspect_datanode(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_recordbatch::error::ExternalSnafu)
|
||||
};
|
||||
Ok(Box::pin(AsyncRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
Box::pin(future),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Information schema table for sst storage.
|
||||
pub struct InformationSchemaSstsStorage {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl InformationSchemaSstsStorage {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: StorageSstEntry::schema(),
|
||||
catalog_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaSstsStorage {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
SSTS_STORAGE
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = if let Some(p) = &request.projection {
|
||||
Arc::new(self.schema.try_project(p).context(ProjectSchemaSnafu)?)
|
||||
} else {
|
||||
self.schema.clone()
|
||||
};
|
||||
|
||||
let info_ext = utils::information_extension(&self.catalog_manager)?;
|
||||
let req = DatanodeInspectRequest {
|
||||
kind: DatanodeInspectKind::SstStorage,
|
||||
scan: request,
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
info_ext
|
||||
.inspect_datanode(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_recordbatch::error::ExternalSnafu)
|
||||
};
|
||||
Ok(Box::pin(AsyncRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
Box::pin(future),
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
@@ -32,15 +32,15 @@ use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::information_schema::key_column_usage::{
|
||||
CONSTRAINT_NAME_PRI, CONSTRAINT_NAME_TIME_INDEX,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, TABLE_CONSTRAINTS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -48,3 +48,5 @@ pub const FLOWS: &str = "flows";
|
||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||
pub const PROCESS_LIST: &str = "process_list";
|
||||
pub const SSTS_MANIFEST: &str = "ssts_manifest";
|
||||
pub const SSTS_STORAGE: &str = "ssts_storage";
|
||||
|
||||
@@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -37,12 +37,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, TABLES};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -32,13 +32,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, VIEWS};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -12,53 +12,41 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod pg_catalog_memory_table;
|
||||
mod pg_class;
|
||||
mod pg_database;
|
||||
mod pg_namespace;
|
||||
mod table_names;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, LazyLock, Weak};
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use pg_catalog_memory_table::get_schema_columns;
|
||||
use pg_class::PGClass;
|
||||
use pg_database::PGDatabase;
|
||||
use pg_namespace::PGNamespace;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use arrow_schema::SchemaRef;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, PG_CATALOG_NAME, PG_CATALOG_TABLE_ID_START};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_telemetry::warn;
|
||||
use datafusion::datasource::TableType;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion_pg_catalog::pg_catalog::catalog_info::CatalogInfo;
|
||||
use datafusion_pg_catalog::pg_catalog::{
|
||||
PG_CATALOG_TABLES, PgCatalogSchemaProvider, PgCatalogStaticTables, PgCatalogTable,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::TableRef;
|
||||
pub use table_names::*;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
use crate::system_schema::utils::tables::u32_column;
|
||||
use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
}
|
||||
|
||||
/// The column name for the OID column.
|
||||
/// The OID column is a unique identifier of type u32 for each object in the database.
|
||||
const OID_COLUMN_NAME: &str = "oid";
|
||||
|
||||
fn oid_column() -> ColumnSchema {
|
||||
u32_column(OID_COLUMN_NAME)
|
||||
}
|
||||
use crate::error::{InternalSnafu, ProjectSchemaSnafu, Result};
|
||||
use crate::system_schema::{
|
||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||
};
|
||||
|
||||
/// [`PGCatalogProvider`] is the provider for a schema named `pg_catalog`, it is not a catalog.
|
||||
pub struct PGCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
inner: PgCatalogSchemaProvider<CatalogManagerWrapper>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
|
||||
// Workaround to store mapping of schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
table_ids: HashMap<&'static str, u32>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for PGCatalogProvider {
|
||||
@@ -69,30 +57,33 @@ impl SystemSchemaProvider for PGCatalogProvider {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(j0hn50n133): Not sure whether to avoid duplication with `information_schema` or not.
|
||||
macro_rules! setup_memory_table {
|
||||
($name: expr) => {
|
||||
paste! {
|
||||
{
|
||||
let (schema, columns) = get_schema_columns($name);
|
||||
Some(Arc::new(MemoryTable::new(
|
||||
consts::[<PG_CATALOG_ $name _TABLE_ID>],
|
||||
$name,
|
||||
schema,
|
||||
columns
|
||||
)) as _)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl PGCatalogProvider {
|
||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
// safe to expect/unwrap because it contains only schema read, this can
|
||||
// be ensured by sqlness tests
|
||||
let static_tables =
|
||||
PgCatalogStaticTables::try_new().expect("Failed to initialize static tables");
|
||||
let inner = PgCatalogSchemaProvider::try_new(
|
||||
CatalogManagerWrapper {
|
||||
catalog_name: catalog_name.clone(),
|
||||
catalog_manager,
|
||||
},
|
||||
Arc::new(static_tables),
|
||||
)
|
||||
.expect("Failed to initialize PgCatalogSchemaProvider");
|
||||
|
||||
let mut table_ids = HashMap::new();
|
||||
let mut table_id = PG_CATALOG_TABLE_ID_START;
|
||||
for name in PG_CATALOG_TABLES {
|
||||
table_ids.insert(*name, table_id);
|
||||
table_id += 1;
|
||||
}
|
||||
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
inner,
|
||||
tables: HashMap::new(),
|
||||
namespace_oid_map: Arc::new(PGNamespaceOidMap::new()),
|
||||
table_ids,
|
||||
};
|
||||
provider.build_tables();
|
||||
provider
|
||||
@@ -102,23 +93,13 @@ impl PGCatalogProvider {
|
||||
// SECURITY NOTE:
|
||||
// Must follow the same security rules as [`InformationSchemaProvider::build_tables`].
|
||||
let mut tables = HashMap::new();
|
||||
// TODO(J0HN50N133): modeling the table_name as a enum type to get rid of expect/unwrap here
|
||||
// It's safe to unwrap here because we are sure that the constants have been handle correctly inside system_table.
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
for name in PG_CATALOG_TABLES {
|
||||
if let Some(table) = self.build_table(name) {
|
||||
tables.insert(name.to_string(), table);
|
||||
}
|
||||
}
|
||||
tables.insert(
|
||||
PG_NAMESPACE.to_string(),
|
||||
self.build_table(PG_NAMESPACE).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_CLASS.to_string(),
|
||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||
);
|
||||
tables.insert(
|
||||
PG_DATABASE.to_string(),
|
||||
self.build_table(PG_DATABASE).expect(PG_DATABASE),
|
||||
);
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
@@ -129,24 +110,26 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
}
|
||||
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||
match name {
|
||||
table_names::PG_TYPE => setup_memory_table!(PG_TYPE),
|
||||
table_names::PG_NAMESPACE => Some(Arc::new(PGNamespace::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_CLASS => Some(Arc::new(PGClass::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
))),
|
||||
_ => None,
|
||||
if let Some((table_name, table_id)) = self.table_ids.get_key_value(name) {
|
||||
let table = self.inner.build_table_by_name(name).expect(name);
|
||||
|
||||
if let Some(table) = table {
|
||||
if let Ok(system_table) = DFTableProviderAsSystemTable::try_new(
|
||||
*table_id,
|
||||
table_name,
|
||||
table::metadata::TableType::Temporary,
|
||||
table,
|
||||
) {
|
||||
Some(Arc::new(system_table))
|
||||
} else {
|
||||
warn!("failed to create pg_catalog system table {}", name);
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,11 +138,177 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide query context to call the [`CatalogManager`]'s method.
|
||||
static PG_QUERY_CTX: LazyLock<QueryContext> = LazyLock::new(|| {
|
||||
QueryContext::with_channel(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, Channel::Postgres)
|
||||
});
|
||||
|
||||
fn query_ctx() -> Option<&'static QueryContext> {
|
||||
Some(&PG_QUERY_CTX)
|
||||
#[derive(Clone)]
|
||||
pub struct CatalogManagerWrapper {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
}
|
||||
|
||||
impl CatalogManagerWrapper {
|
||||
fn catalog_manager(&self) -> std::result::Result<Arc<dyn CatalogManager>, DataFusionError> {
|
||||
self.catalog_manager.upgrade().ok_or_else(|| {
|
||||
DataFusionError::Internal("Failed to access catalog manager".to_string())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CatalogManagerWrapper {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CatalogManagerWrapper").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CatalogInfo for CatalogManagerWrapper {
|
||||
async fn catalog_names(&self) -> std::result::Result<Vec<String>, DataFusionError> {
|
||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||
CatalogManager::catalog_names(self.catalog_manager()?.as_ref())
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))
|
||||
} else {
|
||||
Ok(vec![self.catalog_name.to_string()])
|
||||
}
|
||||
}
|
||||
|
||||
async fn schema_names(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
) -> std::result::Result<Option<Vec<String>>, DataFusionError> {
|
||||
self.catalog_manager()?
|
||||
.schema_names(catalog_name, None)
|
||||
.await
|
||||
.map(Some)
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))
|
||||
}
|
||||
|
||||
async fn table_names(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
) -> std::result::Result<Option<Vec<String>>, DataFusionError> {
|
||||
self.catalog_manager()?
|
||||
.table_names(catalog_name, schema_name, None)
|
||||
.await
|
||||
.map(Some)
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))
|
||||
}
|
||||
|
||||
async fn table_schema(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> std::result::Result<Option<SchemaRef>, DataFusionError> {
|
||||
let table = self
|
||||
.catalog_manager()?
|
||||
.table(catalog_name, schema_name, table_name, None)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
Ok(table.map(|t| t.schema().arrow_schema().clone()))
|
||||
}
|
||||
|
||||
async fn table_type(
|
||||
&self,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> std::result::Result<Option<TableType>, DataFusionError> {
|
||||
let table = self
|
||||
.catalog_manager()?
|
||||
.table(catalog_name, schema_name, table_name, None)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
Ok(table.map(|t| t.table_type().into()))
|
||||
}
|
||||
}
|
||||
|
||||
struct DFTableProviderAsSystemTable {
|
||||
pub table_id: TableId,
|
||||
pub table_name: &'static str,
|
||||
pub table_type: table::metadata::TableType,
|
||||
pub schema: Arc<datatypes::schema::Schema>,
|
||||
pub table_provider: PgCatalogTable,
|
||||
}
|
||||
|
||||
impl DFTableProviderAsSystemTable {
|
||||
pub fn try_new(
|
||||
table_id: TableId,
|
||||
table_name: &'static str,
|
||||
table_type: table::metadata::TableType,
|
||||
table_provider: PgCatalogTable,
|
||||
) -> Result<Self> {
|
||||
let arrow_schema = table_provider.schema();
|
||||
let schema = Arc::new(arrow_schema.try_into().context(ProjectSchemaSnafu)?);
|
||||
Ok(Self {
|
||||
table_id,
|
||||
table_name,
|
||||
table_type,
|
||||
schema,
|
||||
table_provider,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for DFTableProviderAsSystemTable {
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_id
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
self.table_name
|
||||
}
|
||||
|
||||
fn schema(&self) -> Arc<datatypes::schema::Schema> {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_type(&self) -> table::metadata::TableType {
|
||||
self.table_type
|
||||
}
|
||||
|
||||
fn to_stream(&self, _request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
match &self.table_provider {
|
||||
PgCatalogTable::Static(table) => {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let data = table
|
||||
.data()
|
||||
.iter()
|
||||
.map(|rb| Ok(rb.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::iter(data),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
PgCatalogTable::Dynamic(table) => {
|
||||
let stream = table.execute(Arc::new(TaskContext::default()));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
|
||||
PgCatalogTable::Empty(_) => {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::iter(vec![]),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{Int16Vector, StringVector, UInt32Vector, VectorRef};
|
||||
|
||||
use crate::memory_table_cols;
|
||||
use crate::system_schema::pg_catalog::oid_column;
|
||||
use crate::system_schema::pg_catalog::table_names::PG_TYPE;
|
||||
use crate::system_schema::utils::tables::{i16_column, string_column};
|
||||
|
||||
fn pg_type_schema_columns() -> (Vec<ColumnSchema>, Vec<VectorRef>) {
|
||||
// TODO(j0hn50n133): acquire this information from `DataType` instead of hardcoding it to avoid regression.
|
||||
memory_table_cols!(
|
||||
[oid, typname, typlen],
|
||||
[
|
||||
(1, "String", -1),
|
||||
(2, "Binary", -1),
|
||||
(3, "Int8", 1),
|
||||
(4, "Int16", 2),
|
||||
(5, "Int32", 4),
|
||||
(6, "Int64", 8),
|
||||
(7, "UInt8", 1),
|
||||
(8, "UInt16", 2),
|
||||
(9, "UInt32", 4),
|
||||
(10, "UInt64", 8),
|
||||
(11, "Float32", 4),
|
||||
(12, "Float64", 8),
|
||||
(13, "Decimal", 16),
|
||||
(14, "Date", 4),
|
||||
(15, "DateTime", 8),
|
||||
(16, "Timestamp", 8),
|
||||
(17, "Time", 8),
|
||||
(18, "Duration", 8),
|
||||
(19, "Interval", 16),
|
||||
(20, "List", -1),
|
||||
]
|
||||
);
|
||||
(
|
||||
// not quiet identical with pg, we only follow the definition in pg
|
||||
vec![oid_column(), string_column("typname"), i16_column("typlen")],
|
||||
vec![
|
||||
Arc::new(UInt32Vector::from_vec(oid)), // oid
|
||||
Arc::new(StringVector::from(typname)),
|
||||
Arc::new(Int16Vector::from_vec(typlen)), // typlen in bytes
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||
PG_TYPE => pg_type_schema_columns(),
|
||||
_ => unreachable!("Unknown table in pg_catalog: {}", table_name),
|
||||
};
|
||||
(Arc::new(Schema::new(column_schemas)), columns)
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_CLASS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
pub const RELNAMESPACE: &str = "relnamespace";
|
||||
pub const RELKIND: &str = "relkind";
|
||||
pub const RELOWNER: &str = "relowner";
|
||||
|
||||
// === enum value of relkind ===
|
||||
pub const RELKIND_TABLE: &str = "r";
|
||||
pub const RELKIND_VIEW: &str = "v";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
/// The dummy owner id for the namespace.
|
||||
const DUMMY_OWNER_ID: u32 = 0;
|
||||
|
||||
/// The `pg_catalog.pg_class` table implementation.
|
||||
pub(super) struct PGClass {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGClass {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(RELNAME),
|
||||
u32_column(RELNAMESPACE),
|
||||
string_column(RELKIND),
|
||||
u32_column(RELOWNER),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGClassBuilder {
|
||||
PGClassBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PGClass {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("PGClass")
|
||||
.field("schema", &self.schema)
|
||||
.field("catalog_name", &self.catalog_name)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGClass {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_CLASS
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGClass {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_class(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_class` table row by row
|
||||
/// TODO(J0HN50N133): `relowner` is always the [`DUMMY_OWNER_ID`] because we don't have users.
|
||||
/// Once we have user system, make it the actual owner of the table.
|
||||
struct PGClassBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
relname: StringVectorBuilder,
|
||||
relnamespace: UInt32VectorBuilder,
|
||||
relkind: StringVectorBuilder,
|
||||
relowner: UInt32VectorBuilder,
|
||||
}
|
||||
|
||||
impl PGClassBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relnamespace: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relkind: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
relowner: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_class(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, query_ctx());
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
self.add_class(
|
||||
&predicates,
|
||||
table_info.table_id(),
|
||||
&schema_name,
|
||||
&table_info.name,
|
||||
if table_info.table_type == TableType::View {
|
||||
RELKIND_VIEW
|
||||
} else {
|
||||
RELKIND_TABLE
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_class(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
oid: u32,
|
||||
schema: &str,
|
||||
table: &str,
|
||||
kind: &str,
|
||||
) {
|
||||
let namespace_oid = self.namespace_oid_map.get_oid(schema);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(RELNAMESPACE, &Value::from(schema)),
|
||||
(RELNAME, &Value::from(table)),
|
||||
(RELKIND, &Value::from(kind)),
|
||||
(RELOWNER, &Value::from(DUMMY_OWNER_ID)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.relnamespace.push(Some(namespace_oid));
|
||||
self.relname.push(Some(table));
|
||||
self.relkind.push(Some(kind));
|
||||
self.relowner.push(Some(DUMMY_OWNER_ID));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.oid.finish()),
|
||||
Arc::new(self.relname.finish()),
|
||||
Arc::new(self.relnamespace.finish()),
|
||||
Arc::new(self.relkind.finish()),
|
||||
Arc::new(self.relowner.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
// === column name ===
|
||||
pub const DATNAME: &str = "datname";
|
||||
|
||||
/// The initial capacity of the vector builders.
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `pg_catalog.database` table implementation.
|
||||
pub(super) struct PGDatabase {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PGDatabase {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("PGDatabase")
|
||||
.field("schema", &self.schema)
|
||||
.field("catalog_name", &self.catalog_name)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl PGDatabase {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(DATNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGCDatabaseBuilder {
|
||||
PGCDatabaseBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.namespace_oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGDatabase {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_database(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGDatabase {
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_DATABASE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_DATABASE
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(
|
||||
&self,
|
||||
request: ScanRequest,
|
||||
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_database(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_database` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGCDatabaseBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
datname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGCDatabaseBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
self.add_database(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row: [(&str, &Value); 2] = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(DATNAME, &Value::from(schema_name)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.oid.push(Some(oid));
|
||||
self.datname.push(Some(schema_name));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! The `pg_catalog.pg_namespace` table implementation.
|
||||
//! namespace is a schema in greptime
|
||||
|
||||
pub(super) mod oid_map;
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::PG_CATALOG_PG_NAMESPACE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::{
|
||||
query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE,
|
||||
};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub(super) struct PGNamespace {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
// Workaround to convert schema_name to a numeric id
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
}
|
||||
|
||||
impl PGNamespace {
|
||||
pub(super) fn new(
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
oid_map,
|
||||
}
|
||||
}
|
||||
|
||||
fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
// TODO(J0HN50N133): we do not have a numeric schema id, use schema name as a workaround. Use a proper schema id once we have it.
|
||||
u32_column(OID_COLUMN_NAME),
|
||||
string_column(NSPNAME),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> PGNamespaceBuilder {
|
||||
PGNamespaceBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.oid_map.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PGNamespace {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("PGNamespace")
|
||||
.field("schema", &self.schema)
|
||||
.field("catalog_name", &self.catalog_name)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemTable for PGNamespace {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn table_id(&self) -> table::metadata::TableId {
|
||||
PG_CATALOG_PG_NAMESPACE_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
PG_NAMESPACE
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for PGNamespace {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_namespace(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the `pg_catalog.pg_namespace` table row by row
|
||||
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||
/// `nspname` is the schema name.
|
||||
struct PGNamespaceBuilder {
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
|
||||
oid: UInt32VectorBuilder,
|
||||
nspname: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl PGNamespaceBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
namespace_oid_map: PGNamespaceOidMapRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
namespace_oid_map,
|
||||
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
nspname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `pg_catalog.pg_namespace` virtual table
|
||||
async fn make_namespace(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
for schema_name in catalog_manager
|
||||
.schema_names(&catalog_name, query_ctx())
|
||||
.await?
|
||||
{
|
||||
self.add_namespace(&predicates, &schema_name);
|
||||
}
|
||||
self.finish()
|
||||
}
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> =
|
||||
vec![Arc::new(self.oid.finish()), Arc::new(self.nspname.finish())];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
fn add_namespace(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||
let row = [
|
||||
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||
(NSPNAME, &Value::from(schema_name)),
|
||||
];
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
self.oid.push(Some(oid));
|
||||
self.nspname.push(Some(schema_name));
|
||||
}
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::BuildHasher;
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use rustc_hash::FxSeededState;
|
||||
|
||||
pub type PGNamespaceOidMapRef = Arc<PGNamespaceOidMap>;
|
||||
// Workaround to convert schema_name to a numeric id,
|
||||
// remove this when we have numeric schema id in greptime
|
||||
pub struct PGNamespaceOidMap {
|
||||
oid_map: DashMap<String, u32>,
|
||||
|
||||
// Rust use SipHasher by default, which provides resistance against DOS attacks.
|
||||
// This will produce different hash value between each greptime instance. This will
|
||||
// cause the sqlness test fail. We need a deterministic hash here to provide
|
||||
// same oid for the same schema name with best effort and DOS attacks aren't concern here.
|
||||
hasher: FxSeededState,
|
||||
}
|
||||
|
||||
impl PGNamespaceOidMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
oid_map: DashMap::new(),
|
||||
hasher: FxSeededState::with_seed(0), // PLEASE DO NOT MODIFY THIS SEED VALUE!!!
|
||||
}
|
||||
}
|
||||
|
||||
fn oid_is_used(&self, oid: u32) -> bool {
|
||||
self.oid_map.iter().any(|e| *e.value() == oid)
|
||||
}
|
||||
|
||||
pub fn get_oid(&self, schema_name: &str) -> u32 {
|
||||
if let Some(oid) = self.oid_map.get(schema_name) {
|
||||
*oid
|
||||
} else {
|
||||
let mut oid = self.hasher.hash_one(schema_name) as u32;
|
||||
while self.oid_is_used(oid) {
|
||||
oid = self.hasher.hash_one(oid) as u32;
|
||||
}
|
||||
self.oid_map.insert(schema_name.to_string(), oid);
|
||||
oid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn oid_is_stable() {
|
||||
let oid_map_1 = PGNamespaceOidMap::new();
|
||||
let oid_map_2 = PGNamespaceOidMap::new();
|
||||
|
||||
let schema = "schema";
|
||||
let oid = oid_map_1.get_oid(schema);
|
||||
|
||||
// oid keep stable in the same instance
|
||||
assert_eq!(oid, oid_map_1.get_oid(schema));
|
||||
|
||||
// oid keep stable between different instances
|
||||
assert_eq!(oid, oid_map_2.get_oid(schema));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_collision() {
|
||||
let oid_map = PGNamespaceOidMap::new();
|
||||
|
||||
let key1 = "3178510";
|
||||
let key2 = "4215648";
|
||||
|
||||
// insert them into oid_map
|
||||
let oid1 = oid_map.get_oid(key1);
|
||||
let oid2 = oid_map.get_oid(key2);
|
||||
|
||||
// they should have different id
|
||||
assert_ne!(oid1, oid2);
|
||||
}
|
||||
}
|
||||
@@ -339,18 +339,22 @@ mod tests {
|
||||
assert!(!p.eval(&wrong_row).unwrap());
|
||||
assert!(p.eval(&[]).is_none());
|
||||
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.is_none());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.unwrap());
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.is_none()
|
||||
);
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
//Predicate::Or
|
||||
let p = Predicate::Or(Box::new(p1), Box::new(p2));
|
||||
@@ -358,18 +362,22 @@ mod tests {
|
||||
assert!(p.eval(&wrong_row).unwrap());
|
||||
assert!(p.eval(&[]).is_none());
|
||||
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.is_none());
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.is_none()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -17,10 +17,10 @@ use std::sync::Weak;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{GetInformationExtensionSnafu, Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||
use crate::information_schema::InformationExtensionRef;
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub mod tables;
|
||||
|
||||
|
||||
@@ -27,22 +27,6 @@ pub fn string_column(name: &str) -> ColumnSchema {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn u32_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn i16_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::int16_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
|
||||
@@ -17,27 +17,27 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||
use common_query::logical_plan::{SubstraitPlanDecoderRef, rename_logical_plan_columns};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::datasource::{TableProvider, provider_as_source};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
pub mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::CatalogManagerRef;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
@@ -272,7 +272,7 @@ mod tests {
|
||||
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||
use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder, col, lit};
|
||||
|
||||
use crate::information_schema::NoopInformationExtension;
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ use datafusion::datasource::TableProvider;
|
||||
use snafu::OptionExt;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::TableNotExistSnafu;
|
||||
use crate::CatalogManagerRef;
|
||||
use crate::error::TableNotExistSnafu;
|
||||
|
||||
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -51,6 +51,7 @@ meta-srv.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
operator.workspace = true
|
||||
paste.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::bench::{
|
||||
|
||||
@@ -12,11 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-database.html
|
||||
pub const PG_DATABASE: &str = "pg_database";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
|
||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-class.html
|
||||
pub const PG_CLASS: &str = "pg_class";
|
||||
// https://www.postgresql.org/docs/current/catalog-pg-type.html
|
||||
pub const PG_TYPE: &str = "pg_type";
|
||||
mod object_store;
|
||||
mod store;
|
||||
|
||||
pub use object_store::{ObjectStoreConfig, new_fs_object_store};
|
||||
pub use store::StoreConfig;
|
||||
224
src/cli/src/common/object_store.rs
Normal file
224
src/cli/src/common/object_store.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::secrets::SecretString;
|
||||
use common_error::ext::BoxedError;
|
||||
use object_store::services::{Azblob, Fs, Gcs, Oss, S3};
|
||||
use object_store::util::{with_instrument_layers, with_retry_layers};
|
||||
use object_store::{AzblobConnection, GcsConnection, ObjectStore, OssConnection, S3Connection};
|
||||
use paste::paste;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self};
|
||||
|
||||
macro_rules! wrap_with_clap_prefix {
|
||||
(
|
||||
$new_name:ident, $prefix:literal, $base:ty, {
|
||||
$( $( #[doc = $doc:expr] )? $( #[alias = $alias:literal] )? $field:ident : $type:ty $( = $default:expr )? ),* $(,)?
|
||||
}
|
||||
) => {
|
||||
paste!{
|
||||
#[derive(clap::Parser, Debug, Clone, PartialEq, Default)]
|
||||
pub struct $new_name {
|
||||
$(
|
||||
$( #[doc = $doc] )?
|
||||
$( #[clap(alias = $alias)] )?
|
||||
#[clap(long $(, default_value_t = $default )? )]
|
||||
[<$prefix $field>]: $type,
|
||||
)*
|
||||
}
|
||||
|
||||
impl From<$new_name> for $base {
|
||||
fn from(w: $new_name) -> Self {
|
||||
Self {
|
||||
$( $field: w.[<$prefix $field>] ),*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedAzblobConnection,
|
||||
"azblob-",
|
||||
AzblobConnection,
|
||||
{
|
||||
#[doc = "The container of the object store."]
|
||||
container: String = Default::default(),
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The account name of the object store."]
|
||||
account_name: SecretString = Default::default(),
|
||||
#[doc = "The account key of the object store."]
|
||||
account_key: SecretString = Default::default(),
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
#[doc = "The SAS token of the object store."]
|
||||
sas_token: Option<String>,
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedS3Connection,
|
||||
"s3-",
|
||||
S3Connection,
|
||||
{
|
||||
#[doc = "The bucket of the object store."]
|
||||
bucket: String = Default::default(),
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The access key ID of the object store."]
|
||||
access_key_id: SecretString = Default::default(),
|
||||
#[doc = "The secret access key of the object store."]
|
||||
secret_access_key: SecretString = Default::default(),
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: Option<String>,
|
||||
#[doc = "The region of the object store."]
|
||||
region: Option<String>,
|
||||
#[doc = "Enable virtual host style for the object store."]
|
||||
enable_virtual_host_style: bool = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedOssConnection,
|
||||
"oss-",
|
||||
OssConnection,
|
||||
{
|
||||
#[doc = "The bucket of the object store."]
|
||||
bucket: String = Default::default(),
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The access key ID of the object store."]
|
||||
access_key_id: SecretString = Default::default(),
|
||||
#[doc = "The access key secret of the object store."]
|
||||
access_key_secret: SecretString = Default::default(),
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
wrap_with_clap_prefix! {
|
||||
PrefixedGcsConnection,
|
||||
"gcs-",
|
||||
GcsConnection,
|
||||
{
|
||||
#[doc = "The root of the object store."]
|
||||
root: String = Default::default(),
|
||||
#[doc = "The bucket of the object store."]
|
||||
bucket: String = Default::default(),
|
||||
#[doc = "The scope of the object store."]
|
||||
scope: String = Default::default(),
|
||||
#[doc = "The credential path of the object store."]
|
||||
credential_path: SecretString = Default::default(),
|
||||
#[doc = "The credential of the object store."]
|
||||
credential: SecretString = Default::default(),
|
||||
#[doc = "The endpoint of the object store."]
|
||||
endpoint: String = Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// common config for object store.
|
||||
#[derive(clap::Parser, Debug, Clone, PartialEq, Default)]
|
||||
pub struct ObjectStoreConfig {
|
||||
/// Whether to use S3 object store.
|
||||
#[clap(long, alias = "s3")]
|
||||
pub enable_s3: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub s3: PrefixedS3Connection,
|
||||
|
||||
/// Whether to use OSS.
|
||||
#[clap(long, alias = "oss")]
|
||||
pub enable_oss: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub oss: PrefixedOssConnection,
|
||||
|
||||
/// Whether to use GCS.
|
||||
#[clap(long, alias = "gcs")]
|
||||
pub enable_gcs: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub gcs: PrefixedGcsConnection,
|
||||
|
||||
/// Whether to use Azure Blob.
|
||||
#[clap(long, alias = "azblob")]
|
||||
pub enable_azblob: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub azblob: PrefixedAzblobConnection,
|
||||
}
|
||||
|
||||
/// Creates a new file system object store.
|
||||
pub fn new_fs_object_store(root: &str) -> std::result::Result<ObjectStore, BoxedError> {
|
||||
let builder = Fs::default().root(root);
|
||||
let object_store = ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish();
|
||||
|
||||
Ok(with_instrument_layers(object_store, false))
|
||||
}
|
||||
|
||||
impl ObjectStoreConfig {
|
||||
/// Builds the object store from the config.
|
||||
pub fn build(&self) -> Result<Option<ObjectStore>, BoxedError> {
|
||||
let object_store = if self.enable_s3 {
|
||||
let s3 = S3Connection::from(self.s3.clone());
|
||||
common_telemetry::info!("Building object store with s3: {:?}", s3);
|
||||
Some(
|
||||
ObjectStore::new(S3::from(&s3))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
} else if self.enable_oss {
|
||||
let oss = OssConnection::from(self.oss.clone());
|
||||
common_telemetry::info!("Building object store with oss: {:?}", oss);
|
||||
Some(
|
||||
ObjectStore::new(Oss::from(&oss))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
} else if self.enable_gcs {
|
||||
let gcs = GcsConnection::from(self.gcs.clone());
|
||||
common_telemetry::info!("Building object store with gcs: {:?}", gcs);
|
||||
Some(
|
||||
ObjectStore::new(Gcs::from(&gcs))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
} else if self.enable_azblob {
|
||||
let azblob = AzblobConnection::from(self.azblob.clone());
|
||||
common_telemetry::info!("Building object store with azblob: {:?}", azblob);
|
||||
Some(
|
||||
ObjectStore::new(Azblob::from(&azblob))
|
||||
.context(error::InitBackendSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let object_store = object_store
|
||||
.map(|object_store| with_instrument_layers(with_retry_layers(object_store), false));
|
||||
|
||||
Ok(object_store)
|
||||
}
|
||||
}
|
||||
@@ -16,17 +16,17 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use meta_srv::bootstrap::create_etcd_client_with_tls;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use meta_srv::utils::etcd::create_etcd_client_with_tls;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
|
||||
use crate::error::{EmptyStoreAddrsSnafu, UnsupportedMemoryBackendSnafu};
|
||||
use crate::error::EmptyStoreAddrsSnafu;
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub(crate) struct StoreConfig {
|
||||
pub struct StoreConfig {
|
||||
/// The endpoint of store. one of etcd, postgres or mysql.
|
||||
///
|
||||
/// For postgres store, the format is:
|
||||
@@ -38,51 +38,65 @@ pub(crate) struct StoreConfig {
|
||||
/// For mysql store, the format is:
|
||||
/// "mysql://user:password@ip:port/dbname"
|
||||
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
|
||||
store_addrs: Vec<String>,
|
||||
pub store_addrs: Vec<String>,
|
||||
|
||||
/// The maximum number of operations in a transaction. Only used when using [etcd-store].
|
||||
#[clap(long, default_value = "128")]
|
||||
max_txn_ops: usize,
|
||||
pub max_txn_ops: usize,
|
||||
|
||||
/// The metadata store backend.
|
||||
#[clap(long, value_enum, default_value = "etcd-store")]
|
||||
backend: BackendImpl,
|
||||
pub backend: BackendImpl,
|
||||
|
||||
/// The key prefix of the metadata store.
|
||||
#[clap(long, default_value = "")]
|
||||
store_key_prefix: String,
|
||||
pub store_key_prefix: String,
|
||||
|
||||
/// The table name in RDS to store metadata. Only used when using [postgres-store] or [mysql-store].
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
#[clap(long, default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
|
||||
meta_table_name: String,
|
||||
pub meta_table_name: String,
|
||||
|
||||
/// Optional PostgreSQL schema for metadata table (defaults to current search_path if unset).
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
meta_schema_name: Option<String>,
|
||||
pub meta_schema_name: Option<String>,
|
||||
/// TLS mode for backend store connections (etcd, PostgreSQL, MySQL)
|
||||
#[clap(long = "backend-tls-mode", value_enum, default_value = "disable")]
|
||||
backend_tls_mode: TlsMode,
|
||||
pub backend_tls_mode: TlsMode,
|
||||
|
||||
/// Path to TLS certificate file for backend store connections
|
||||
#[clap(long = "backend-tls-cert-path", default_value = "")]
|
||||
backend_tls_cert_path: String,
|
||||
pub backend_tls_cert_path: String,
|
||||
|
||||
/// Path to TLS private key file for backend store connections
|
||||
#[clap(long = "backend-tls-key-path", default_value = "")]
|
||||
backend_tls_key_path: String,
|
||||
pub backend_tls_key_path: String,
|
||||
|
||||
/// Path to TLS CA certificate file for backend store connections
|
||||
#[clap(long = "backend-tls-ca-cert-path", default_value = "")]
|
||||
backend_tls_ca_cert_path: String,
|
||||
pub backend_tls_ca_cert_path: String,
|
||||
|
||||
/// Enable watching TLS certificate files for changes
|
||||
#[clap(long = "backend-tls-watch")]
|
||||
backend_tls_watch: bool,
|
||||
pub backend_tls_watch: bool,
|
||||
}
|
||||
|
||||
impl StoreConfig {
|
||||
pub fn tls_config(&self) -> Option<TlsOption> {
|
||||
if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a [`KvBackendRef`] from the store configuration.
|
||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||
let max_txn_ops = self.max_txn_ops;
|
||||
@@ -90,19 +104,14 @@ impl StoreConfig {
|
||||
if store_addrs.is_empty() {
|
||||
EmptyStoreAddrsSnafu.fail().map_err(BoxedError::new)
|
||||
} else {
|
||||
common_telemetry::info!(
|
||||
"Building kvbackend with store addrs: {:?}, backend: {:?}",
|
||||
store_addrs,
|
||||
self.backend
|
||||
);
|
||||
let kvbackend = match self.backend {
|
||||
BackendImpl::EtcdStore => {
|
||||
let tls_config = if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tls_config = self.tls_config();
|
||||
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
@@ -111,9 +120,14 @@ impl StoreConfig {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
BackendImpl::PostgresStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, None)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let tls_config = self.tls_config();
|
||||
let pool = meta_srv::utils::postgres::create_postgres_pool(
|
||||
store_addrs,
|
||||
None,
|
||||
tls_config,
|
||||
)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let schema_name = self.meta_schema_name.as_deref();
|
||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||
pool,
|
||||
@@ -127,9 +141,11 @@ impl StoreConfig {
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
BackendImpl::MysqlStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let tls_config = self.tls_config();
|
||||
let pool =
|
||||
meta_srv::utils::mysql::create_mysql_pool(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
|
||||
pool,
|
||||
table_name,
|
||||
@@ -138,9 +154,20 @@ impl StoreConfig {
|
||||
.await
|
||||
.map_err(BoxedError::new)?)
|
||||
}
|
||||
BackendImpl::MemoryStore => UnsupportedMemoryBackendSnafu
|
||||
.fail()
|
||||
.map_err(BoxedError::new),
|
||||
#[cfg(not(test))]
|
||||
BackendImpl::MemoryStore => {
|
||||
use crate::error::UnsupportedMemoryBackendSnafu;
|
||||
|
||||
UnsupportedMemoryBackendSnafu
|
||||
.fail()
|
||||
.map_err(BoxedError::new)
|
||||
}
|
||||
#[cfg(test)]
|
||||
BackendImpl::MemoryStore => {
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
Ok(Arc::new(MemoryKvBackend::default()) as _)
|
||||
}
|
||||
};
|
||||
if self.store_key_prefix.is_empty() {
|
||||
kvbackend
|
||||
@@ -18,9 +18,9 @@ mod import;
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::data::export::ExportCommand;
|
||||
use crate::data::import::ImportCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
||||
#[derive(Subcommand)]
|
||||
|
||||
@@ -24,18 +24,18 @@ use common_error::ext::BoxedError;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use object_store::layers::LoggingLayer;
|
||||
use object_store::services::Oss;
|
||||
use object_store::{services, ObjectStore};
|
||||
use object_store::{ObjectStore, services};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||
SchemaNotFoundSnafu,
|
||||
};
|
||||
use crate::{database, Tool};
|
||||
use crate::{Tool, database};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
|
||||
@@ -25,9 +25,9 @@ use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::{database, Tool};
|
||||
use crate::{Tool, database};
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ImportTarget {
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use humantime::format_duration;
|
||||
use serde_json::Value;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
|
||||
@@ -313,6 +313,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get current directory"))]
|
||||
GetCurrentDir {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -362,7 +370,9 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||
Error::CacheRequired { .. }
|
||||
| Error::BuildCacheRegistry { .. }
|
||||
| Error::GetCurrentDir { .. } => StatusCode::Internal,
|
||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||
|
||||
@@ -14,13 +14,16 @@
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
mod bench;
|
||||
mod common;
|
||||
mod data;
|
||||
mod database;
|
||||
pub mod error;
|
||||
mod metadata;
|
||||
pub mod utils;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
pub use common::{ObjectStoreConfig, StoreConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
pub use database::DatabaseClient;
|
||||
use error::Result;
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod common;
|
||||
mod control;
|
||||
mod repair;
|
||||
mod snapshot;
|
||||
@@ -21,10 +20,10 @@ mod utils;
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::metadata::control::{DelCommand, GetCommand};
|
||||
use crate::metadata::repair::RepairLogicalTablesCommand;
|
||||
use crate::metadata::snapshot::SnapshotCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// Command for managing metadata operations,
|
||||
/// including saving and restoring metadata snapshots,
|
||||
|
||||
@@ -18,9 +18,9 @@ mod table;
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::metadata::control::del::key::DelKeyCommand;
|
||||
use crate::metadata::control::del::table::DelTableCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// The prefix of the tombstone keys.
|
||||
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
|
||||
|
||||
@@ -19,9 +19,9 @@ use common_meta::key::tombstone::TombstoneManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::Tool;
|
||||
use crate::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
|
||||
/// Delete key-value pairs logically from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -102,8 +102,8 @@ mod tests {
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::control::del::key::KeyDeleter;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::del::key::KeyDeleter;
|
||||
use crate::metadata::control::test_utils::put_key;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -18,16 +18,16 @@ use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::ddl::utils::get_region_wal_options;
|
||||
use common_meta::key::table_name::TableNameManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_name::TableNameManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::common::StoreConfig;
|
||||
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::utils::get_table_id_by_name;
|
||||
use crate::Tool;
|
||||
|
||||
/// Delete table metadata logically from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -48,6 +48,7 @@ pub struct DelTableCommand {
|
||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
||||
catalog_name: String,
|
||||
|
||||
/// The store config.
|
||||
#[clap(flatten)]
|
||||
store: StoreConfig,
|
||||
}
|
||||
@@ -183,15 +184,15 @@ mod tests {
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
||||
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -19,18 +19,18 @@ use clap::{Parser, Subcommand};
|
||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_info::TableInfoKey;
|
||||
use common_meta::key::table_route::TableRouteKey;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
||||
use crate::Tool;
|
||||
use crate::common::StoreConfig;
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
||||
|
||||
/// Getting metadata from metadata store.
|
||||
#[derive(Subcommand)]
|
||||
|
||||
@@ -31,18 +31,18 @@ use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leaders, RegionRoute};
|
||||
use common_meta::rpc::router::{RegionRoute, find_leaders};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ResultExt, ensure};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::common::StoreConfig;
|
||||
use crate::error::{
|
||||
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
|
||||
use crate::Tool;
|
||||
|
||||
/// Repair metadata of logical tables.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -301,7 +301,10 @@ impl RepairTool {
|
||||
warn!(
|
||||
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
|
||||
full_table_name,
|
||||
failed_peers.iter().map(|(peer, _)| peer.id).collect::<Vec<_>>()
|
||||
failed_peers
|
||||
.iter()
|
||||
.map(|(peer, _)| peer.id)
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
let create_table_expr =
|
||||
@@ -320,8 +323,7 @@ impl RepairTool {
|
||||
}
|
||||
info!(
|
||||
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
|
||||
full_table_name,
|
||||
peer.id
|
||||
full_table_name, peer.id
|
||||
);
|
||||
|
||||
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
use client::api::v1::alter_table_expr::Kind;
|
||||
use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader};
|
||||
use client::api::v1::region::{AlterRequests, RegionRequest, RegionRequestHeader, region_request};
|
||||
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
|
||||
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||
use common_meta::rpc::router::{RegionRoute, find_leader_regions};
|
||||
use operator::expr_helper::column_schemas_to_defs;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
||||
use client::api::v1::CreateTableExpr;
|
||||
use client::api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader, region_request};
|
||||
use common_meta::ddl::create_logical_tables::create_region_request_builder;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||
use common_meta::rpc::router::{RegionRoute, find_leader_regions};
|
||||
use operator::expr_helper::column_schemas_to_defs;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
@@ -12,20 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, Subcommand};
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::snapshot::MetadataSnapshotManager;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::ObjectStore;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use object_store::{ObjectStore, Scheme};
|
||||
|
||||
use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::Tool;
|
||||
use crate::common::{ObjectStoreConfig, StoreConfig, new_fs_object_store};
|
||||
use crate::utils::resolve_relative_path_with_current_dir;
|
||||
|
||||
/// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information.
|
||||
#[derive(Subcommand)]
|
||||
@@ -41,68 +36,9 @@ pub enum SnapshotCommand {
|
||||
impl SnapshotCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
match self {
|
||||
SnapshotCommand::Save(cmd) => cmd.build().await,
|
||||
SnapshotCommand::Restore(cmd) => cmd.build().await,
|
||||
SnapshotCommand::Info(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(qtang): Abstract a generic s3 config for export import meta snapshot restore
|
||||
#[derive(Debug, Default, Parser)]
|
||||
struct S3Config {
|
||||
/// whether to use s3 as the output directory. default is false.
|
||||
#[clap(long, default_value = "false")]
|
||||
s3: bool,
|
||||
/// The s3 bucket name.
|
||||
#[clap(long)]
|
||||
s3_bucket: Option<String>,
|
||||
/// The s3 region.
|
||||
#[clap(long)]
|
||||
s3_region: Option<String>,
|
||||
/// The s3 access key.
|
||||
#[clap(long)]
|
||||
s3_access_key: Option<SecretString>,
|
||||
/// The s3 secret key.
|
||||
#[clap(long)]
|
||||
s3_secret_key: Option<SecretString>,
|
||||
/// The s3 endpoint. we will automatically use the default s3 decided by the region if not set.
|
||||
#[clap(long)]
|
||||
s3_endpoint: Option<String>,
|
||||
}
|
||||
|
||||
impl S3Config {
|
||||
pub fn build(&self, root: &str) -> Result<Option<ObjectStore>, BoxedError> {
|
||||
if !self.s3 {
|
||||
Ok(None)
|
||||
} else {
|
||||
if self.s3_region.is_none()
|
||||
|| self.s3_access_key.is_none()
|
||||
|| self.s3_secret_key.is_none()
|
||||
|| self.s3_bucket.is_none()
|
||||
{
|
||||
return S3ConfigNotSetSnafu.fail().map_err(BoxedError::new);
|
||||
}
|
||||
// Safety, unwrap is safe because we have checked the options above.
|
||||
let mut config = S3::default()
|
||||
.bucket(self.s3_bucket.as_ref().unwrap())
|
||||
.region(self.s3_region.as_ref().unwrap())
|
||||
.access_key_id(self.s3_access_key.as_ref().unwrap().expose_secret())
|
||||
.secret_access_key(self.s3_secret_key.as_ref().unwrap().expose_secret());
|
||||
|
||||
if !root.is_empty() && root != "." {
|
||||
config = config.root(root);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = &self.s3_endpoint {
|
||||
config = config.endpoint(endpoint);
|
||||
}
|
||||
Ok(Some(
|
||||
ObjectStore::new(config)
|
||||
.context(OpenDalSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish(),
|
||||
))
|
||||
SnapshotCommand::Save(cmd) => Ok(Box::new(cmd.build().await?)),
|
||||
SnapshotCommand::Restore(cmd) => Ok(Box::new(cmd.build().await?)),
|
||||
SnapshotCommand::Info(cmd) => Ok(Box::new(cmd.build().await?)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -116,60 +52,47 @@ pub struct SaveCommand {
|
||||
/// The store configuration.
|
||||
#[clap(flatten)]
|
||||
store: StoreConfig,
|
||||
/// The s3 config.
|
||||
/// The object store configuration.
|
||||
#[clap(flatten)]
|
||||
s3_config: S3Config,
|
||||
/// The name of the target snapshot file. we will add the file extension automatically.
|
||||
#[clap(long, default_value = "metadata_snapshot")]
|
||||
file_name: String,
|
||||
/// The directory to store the snapshot file.
|
||||
/// if target output is s3 bucket, this is the root directory in the bucket.
|
||||
/// if target output is local file, this is the local directory.
|
||||
#[clap(long, default_value = "")]
|
||||
output_dir: String,
|
||||
}
|
||||
|
||||
fn create_local_file_object_store(root: &str) -> Result<ObjectStore, BoxedError> {
|
||||
let root = if root.is_empty() { "." } else { root };
|
||||
let object_store = ObjectStore::new(Fs::default().root(root))
|
||||
.context(OpenDalSnafu)
|
||||
.map_err(BoxedError::new)?
|
||||
.finish();
|
||||
Ok(object_store)
|
||||
object_store: ObjectStoreConfig,
|
||||
/// The path of the target snapshot file.
|
||||
#[clap(
|
||||
long,
|
||||
default_value = "metadata_snapshot.metadata.fb",
|
||||
alias = "file_name"
|
||||
)]
|
||||
file_path: String,
|
||||
/// Specifies the root directory used for I/O operations.
|
||||
#[clap(long, default_value = "/", alias = "output_dir")]
|
||||
dir: String,
|
||||
}
|
||||
|
||||
impl SaveCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
async fn build(&self) -> Result<MetaSnapshotTool, BoxedError> {
|
||||
let kvbackend = self.store.build().await?;
|
||||
let output_dir = &self.output_dir;
|
||||
let object_store = self.s3_config.build(output_dir).map_err(BoxedError::new)?;
|
||||
if let Some(store) = object_store {
|
||||
let tool = MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager::new(kvbackend, store),
|
||||
target_file: self.file_name.clone(),
|
||||
};
|
||||
Ok(Box::new(tool))
|
||||
} else {
|
||||
let object_store = create_local_file_object_store(output_dir)?;
|
||||
let tool = MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager::new(kvbackend, object_store),
|
||||
target_file: self.file_name.clone(),
|
||||
};
|
||||
Ok(Box::new(tool))
|
||||
}
|
||||
let (object_store, file_path) = build_object_store_and_resolve_file_path(
|
||||
self.object_store.clone(),
|
||||
&self.dir,
|
||||
&self.file_path,
|
||||
)?;
|
||||
let tool = MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager::new(kvbackend, object_store),
|
||||
file_path,
|
||||
};
|
||||
Ok(tool)
|
||||
}
|
||||
}
|
||||
|
||||
struct MetaSnapshotTool {
|
||||
inner: MetadataSnapshotManager,
|
||||
target_file: String,
|
||||
file_path: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MetaSnapshotTool {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
self.inner
|
||||
.dump("", &self.target_file)
|
||||
.dump(&self.file_path)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
@@ -186,54 +109,52 @@ pub struct RestoreCommand {
|
||||
/// The store configuration.
|
||||
#[clap(flatten)]
|
||||
store: StoreConfig,
|
||||
/// The s3 config.
|
||||
/// The object store config.
|
||||
#[clap(flatten)]
|
||||
s3_config: S3Config,
|
||||
/// The name of the target snapshot file.
|
||||
#[clap(long, default_value = "metadata_snapshot.metadata.fb")]
|
||||
file_name: String,
|
||||
/// The directory to store the snapshot file.
|
||||
#[clap(long, default_value = ".")]
|
||||
input_dir: String,
|
||||
object_store: ObjectStoreConfig,
|
||||
/// The path of the target snapshot file.
|
||||
#[clap(
|
||||
long,
|
||||
default_value = "metadata_snapshot.metadata.fb",
|
||||
alias = "file_name"
|
||||
)]
|
||||
file_path: String,
|
||||
/// Specifies the root directory used for I/O operations.
|
||||
#[clap(long, default_value = "/", alias = "input_dir")]
|
||||
dir: String,
|
||||
#[clap(long, default_value = "false")]
|
||||
force: bool,
|
||||
}
|
||||
|
||||
impl RestoreCommand {
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
async fn build(&self) -> Result<MetaRestoreTool, BoxedError> {
|
||||
let kvbackend = self.store.build().await?;
|
||||
let input_dir = &self.input_dir;
|
||||
let object_store = self.s3_config.build(input_dir).map_err(BoxedError::new)?;
|
||||
if let Some(store) = object_store {
|
||||
let tool = MetaRestoreTool::new(
|
||||
MetadataSnapshotManager::new(kvbackend, store),
|
||||
self.file_name.clone(),
|
||||
self.force,
|
||||
);
|
||||
Ok(Box::new(tool))
|
||||
} else {
|
||||
let object_store = create_local_file_object_store(input_dir)?;
|
||||
let tool = MetaRestoreTool::new(
|
||||
MetadataSnapshotManager::new(kvbackend, object_store),
|
||||
self.file_name.clone(),
|
||||
self.force,
|
||||
);
|
||||
Ok(Box::new(tool))
|
||||
}
|
||||
let (object_store, file_path) = build_object_store_and_resolve_file_path(
|
||||
self.object_store.clone(),
|
||||
&self.dir,
|
||||
&self.file_path,
|
||||
)
|
||||
.map_err(BoxedError::new)?;
|
||||
let tool = MetaRestoreTool::new(
|
||||
MetadataSnapshotManager::new(kvbackend, object_store),
|
||||
file_path,
|
||||
self.force,
|
||||
);
|
||||
Ok(tool)
|
||||
}
|
||||
}
|
||||
|
||||
struct MetaRestoreTool {
|
||||
inner: MetadataSnapshotManager,
|
||||
source_file: String,
|
||||
file_path: String,
|
||||
force: bool,
|
||||
}
|
||||
|
||||
impl MetaRestoreTool {
|
||||
pub fn new(inner: MetadataSnapshotManager, source_file: String, force: bool) -> Self {
|
||||
pub fn new(inner: MetadataSnapshotManager, file_path: String, force: bool) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
source_file,
|
||||
file_path,
|
||||
force,
|
||||
}
|
||||
}
|
||||
@@ -252,19 +173,21 @@ impl Tool for MetaRestoreTool {
|
||||
"The target source is clean, we will restore the metadata snapshot."
|
||||
);
|
||||
self.inner
|
||||
.restore(&self.source_file)
|
||||
.restore(&self.file_path)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
} else if !self.force {
|
||||
common_telemetry::warn!(
|
||||
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
||||
);
|
||||
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
||||
);
|
||||
Ok(())
|
||||
} else {
|
||||
common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force.");
|
||||
common_telemetry::info!(
|
||||
"The target source is not clean, We will restore the metadata snapshot with --force."
|
||||
);
|
||||
self.inner
|
||||
.restore(&self.source_file)
|
||||
.restore(&self.file_path)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
Ok(())
|
||||
@@ -278,12 +201,19 @@ impl Tool for MetaRestoreTool {
|
||||
/// It prints the filtered metadata to the console.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct InfoCommand {
|
||||
/// The s3 config.
|
||||
/// The object store config.
|
||||
#[clap(flatten)]
|
||||
s3_config: S3Config,
|
||||
/// The name of the target snapshot file. we will add the file extension automatically.
|
||||
#[clap(long, default_value = "metadata_snapshot")]
|
||||
file_name: String,
|
||||
object_store: ObjectStoreConfig,
|
||||
/// The path of the target snapshot file.
|
||||
#[clap(
|
||||
long,
|
||||
default_value = "metadata_snapshot.metadata.fb",
|
||||
alias = "file_name"
|
||||
)]
|
||||
file_path: String,
|
||||
/// Specifies the root directory used for I/O operations.
|
||||
#[clap(long, default_value = "/", alias = "input_dir")]
|
||||
dir: String,
|
||||
/// The query string to filter the metadata.
|
||||
#[clap(long, default_value = "*")]
|
||||
inspect_key: String,
|
||||
@@ -294,7 +224,7 @@ pub struct InfoCommand {
|
||||
|
||||
struct MetaInfoTool {
|
||||
inner: ObjectStore,
|
||||
source_file: String,
|
||||
file_path: String,
|
||||
inspect_key: String,
|
||||
limit: Option<usize>,
|
||||
}
|
||||
@@ -304,7 +234,7 @@ impl Tool for MetaInfoTool {
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
let result = MetadataSnapshotManager::info(
|
||||
&self.inner,
|
||||
&self.source_file,
|
||||
&self.file_path,
|
||||
&self.inspect_key,
|
||||
self.limit,
|
||||
)
|
||||
@@ -318,45 +248,90 @@ impl Tool for MetaInfoTool {
|
||||
}
|
||||
|
||||
impl InfoCommand {
|
||||
fn decide_object_store_root_for_local_store(
|
||||
file_path: &str,
|
||||
) -> Result<(&str, &str), BoxedError> {
|
||||
let path = Path::new(file_path);
|
||||
let parent = path
|
||||
.parent()
|
||||
.and_then(|p| p.to_str())
|
||||
.context(InvalidFilePathSnafu { msg: file_path })
|
||||
.map_err(BoxedError::new)?;
|
||||
let file_name = path
|
||||
.file_name()
|
||||
.and_then(|f| f.to_str())
|
||||
.context(InvalidFilePathSnafu { msg: file_path })
|
||||
.map_err(BoxedError::new)?;
|
||||
let root = if parent.is_empty() { "." } else { parent };
|
||||
Ok((root, file_name))
|
||||
}
|
||||
|
||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||
let object_store = self.s3_config.build("").map_err(BoxedError::new)?;
|
||||
if let Some(store) = object_store {
|
||||
let tool = MetaInfoTool {
|
||||
inner: store,
|
||||
source_file: self.file_name.clone(),
|
||||
inspect_key: self.inspect_key.clone(),
|
||||
limit: self.limit,
|
||||
};
|
||||
Ok(Box::new(tool))
|
||||
} else {
|
||||
let (root, file_name) =
|
||||
Self::decide_object_store_root_for_local_store(&self.file_name)?;
|
||||
let object_store = create_local_file_object_store(root)?;
|
||||
let tool = MetaInfoTool {
|
||||
inner: object_store,
|
||||
source_file: file_name.to_string(),
|
||||
inspect_key: self.inspect_key.clone(),
|
||||
limit: self.limit,
|
||||
};
|
||||
Ok(Box::new(tool))
|
||||
}
|
||||
async fn build(&self) -> Result<MetaInfoTool, BoxedError> {
|
||||
let (object_store, file_path) = build_object_store_and_resolve_file_path(
|
||||
self.object_store.clone(),
|
||||
&self.dir,
|
||||
&self.file_path,
|
||||
)?;
|
||||
let tool = MetaInfoTool {
|
||||
inner: object_store,
|
||||
file_path,
|
||||
inspect_key: self.inspect_key.clone(),
|
||||
limit: self.limit,
|
||||
};
|
||||
Ok(tool)
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the object store and resolves the file path.
|
||||
fn build_object_store_and_resolve_file_path(
|
||||
object_store: ObjectStoreConfig,
|
||||
fs_root: &str,
|
||||
file_path: &str,
|
||||
) -> Result<(ObjectStore, String), BoxedError> {
|
||||
let object_store = object_store.build().map_err(BoxedError::new)?;
|
||||
let object_store = match object_store {
|
||||
Some(object_store) => object_store,
|
||||
None => new_fs_object_store(fs_root)?,
|
||||
};
|
||||
|
||||
let file_path = if matches!(object_store.info().scheme(), Scheme::Fs) {
|
||||
resolve_relative_path_with_current_dir(file_path).map_err(BoxedError::new)?
|
||||
} else {
|
||||
file_path.to_string()
|
||||
};
|
||||
|
||||
Ok((object_store, file_path))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
use crate::metadata::snapshot::RestoreCommand;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cmd_resolve_file_path() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cmd = RestoreCommand::parse_from([
|
||||
"",
|
||||
"--file_name",
|
||||
"metadata_snapshot.metadata.fb",
|
||||
"--backend",
|
||||
"memory-store",
|
||||
"--store-addrs",
|
||||
"memory://",
|
||||
]);
|
||||
let tool = cmd.build().await.unwrap();
|
||||
let current_dir = env::current_dir().unwrap();
|
||||
let file_path = current_dir.join("metadata_snapshot.metadata.fb");
|
||||
assert_eq!(tool.file_path, file_path.to_string_lossy().to_string());
|
||||
|
||||
let cmd = RestoreCommand::parse_from([
|
||||
"",
|
||||
"--file_name",
|
||||
"metadata_snapshot.metadata.fb",
|
||||
"--backend",
|
||||
"memory-store",
|
||||
"--store-addrs",
|
||||
"memory://",
|
||||
]);
|
||||
let tool = cmd.build().await.unwrap();
|
||||
assert_eq!(tool.file_path, file_path.to_string_lossy().to_string());
|
||||
|
||||
let cmd = RestoreCommand::parse_from([
|
||||
"",
|
||||
"--file_name",
|
||||
"metadata_snapshot.metadata.fb",
|
||||
"--backend",
|
||||
"memory-store",
|
||||
"--store-addrs",
|
||||
"memory://",
|
||||
]);
|
||||
let tool = cmd.build().await.unwrap();
|
||||
assert_eq!(tool.file_path, file_path.to_string_lossy().to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::collections::VecDeque;
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use futures::Stream;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
94
src/cli/src/utils.rs
Normal file
94
src/cli/src/utils.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::env;
|
||||
use std::path::Path;
|
||||
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{GetCurrentDirSnafu, Result};
|
||||
|
||||
/// Resolves the relative path to an absolute path.
|
||||
pub fn resolve_relative_path(current_dir: impl AsRef<Path>, path_str: &str) -> String {
|
||||
let path = Path::new(path_str);
|
||||
if path.is_relative() {
|
||||
let path = current_dir.as_ref().join(path);
|
||||
common_telemetry::debug!("Resolved relative path: {}", path.to_string_lossy());
|
||||
path.to_string_lossy().to_string()
|
||||
} else {
|
||||
path_str.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves the relative path to an absolute path.
|
||||
pub fn resolve_relative_path_with_current_dir(path_str: &str) -> Result<String> {
|
||||
let current_dir = env::current_dir().context(GetCurrentDirSnafu)?;
|
||||
Ok(resolve_relative_path(current_dir, path_str))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_resolve_relative_path_absolute() {
|
||||
let abs_path = if cfg!(windows) {
|
||||
"C:\\foo\\bar"
|
||||
} else {
|
||||
"/foo/bar"
|
||||
};
|
||||
let current_dir = PathBuf::from("/tmp");
|
||||
let result = resolve_relative_path(¤t_dir, abs_path);
|
||||
assert_eq!(result, abs_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resolve_relative_path_relative() {
|
||||
let current_dir = PathBuf::from("/tmp");
|
||||
let rel_path = "foo/bar";
|
||||
let expected = "/tmp/foo/bar";
|
||||
let result = resolve_relative_path(¤t_dir, rel_path);
|
||||
// On Windows, the separator is '\', so normalize for comparison
|
||||
// '/' is as a normal character in Windows paths
|
||||
if cfg!(windows) {
|
||||
assert!(result.ends_with("foo/bar"));
|
||||
assert!(result.contains("/tmp\\"));
|
||||
} else {
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resolve_relative_path_with_current_dir_absolute() {
|
||||
let abs_path = if cfg!(windows) {
|
||||
"C:\\foo\\bar"
|
||||
} else {
|
||||
"/foo/bar"
|
||||
};
|
||||
let result = resolve_relative_path_with_current_dir(abs_path).unwrap();
|
||||
assert_eq!(result, abs_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resolve_relative_path_with_current_dir_relative() {
|
||||
let rel_path = "foo/bar";
|
||||
let current_dir = env::current_dir().unwrap();
|
||||
let expected = current_dir.join(rel_path).to_string_lossy().to_string();
|
||||
let result = resolve_relative_path_with_current_dir(rel_path).unwrap();
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::HealthCheckRequest;
|
||||
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
||||
use parking_lot::RwLock;
|
||||
@@ -27,7 +27,7 @@ use tonic::codec::CompressionEncoding;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
use crate::{Result, error};
|
||||
|
||||
pub struct FlightClient {
|
||||
addr: String,
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_meta::node_manager::{DatanodeManager, DatanodeRef, FlownodeManager, F
|
||||
use common_meta::peer::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::Client;
|
||||
use crate::flow::FlowRequester;
|
||||
use crate::region::RegionRequester;
|
||||
use crate::Client;
|
||||
|
||||
pub struct NodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
|
||||
@@ -27,8 +27,8 @@ use api::v1::{
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use async_stream::stream;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -42,7 +42,7 @@ use common_telemetry::{error, warn};
|
||||
use futures::future;
|
||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap, MetadataValue};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
@@ -50,7 +50,7 @@ use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
||||
InvalidTonicMetadataValueSnafu,
|
||||
};
|
||||
use crate::{error, from_grpc_response, Client, Result};
|
||||
use crate::{Client, Result, error, from_grpc_response};
|
||||
|
||||
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
|
||||
|
||||
@@ -379,11 +379,10 @@ impl Database {
|
||||
tonic_code,
|
||||
e
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})
|
||||
})?;
|
||||
|
||||
let flight_data_stream = response.into_inner();
|
||||
|
||||
@@ -18,9 +18,9 @@ use common_error::define_from_tonic_status;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
use snafu::{Location, Snafu, location};
|
||||
use tonic::Code;
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
|
||||
@@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::flow::{DirtyWindowRequest, DirtyWindowRequests, FlowRequest, FlowResponse};
|
||||
use api::v1::flow::{DirtyWindowRequests, FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
use crate::Client;
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FlowRequester {
|
||||
@@ -47,7 +47,7 @@ impl Flownode for FlowRequester {
|
||||
|
||||
async fn handle_mark_window_dirty(
|
||||
&self,
|
||||
req: DirtyWindowRequest,
|
||||
req: DirtyWindowRequests,
|
||||
) -> common_meta::error::Result<FlowResponse> {
|
||||
self.handle_mark_window_dirty(req)
|
||||
.await
|
||||
@@ -102,12 +102,10 @@ impl FlowRequester {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
|
||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequests) -> Result<FlowResponse> {
|
||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||
let response = client
|
||||
.handle_mark_dirty_time_window(DirtyWindowRequests {
|
||||
requests: vec![req],
|
||||
})
|
||||
.handle_mark_dirty_time_window(req)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
let code = e.code();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user