mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
58 Commits
v0.17.0
...
v0.18.0-ni
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3aabb6706 | ||
|
|
028effe952 | ||
|
|
d86f489a74 | ||
|
|
6c066c1a4a | ||
|
|
9ab87e11a4 | ||
|
|
9fe7069146 | ||
|
|
733a1afcd1 | ||
|
|
5e65581f94 | ||
|
|
e75e5baa63 | ||
|
|
c4b89df523 | ||
|
|
6a15e62719 | ||
|
|
2bddbe8c47 | ||
|
|
ea8125aafb | ||
|
|
49722951c6 | ||
|
|
c431b036ec | ||
|
|
c797d87210 | ||
|
|
e0ce0a6446 | ||
|
|
2f6da3b718 | ||
|
|
83290be3ba | ||
|
|
21ee981b49 | ||
|
|
44c2aa4c23 | ||
|
|
9c22092189 | ||
|
|
ad690e14d0 | ||
|
|
264d05d20e | ||
|
|
9fe84f6fbd | ||
|
|
ac051af201 | ||
|
|
948a6578fa | ||
|
|
16febbd4c2 | ||
|
|
47384c7701 | ||
|
|
c9377e7c5a | ||
|
|
658d07bfc8 | ||
|
|
24e5c9f6da | ||
|
|
4158afa618 | ||
|
|
48f5db3f5f | ||
|
|
f7c8d86ebb | ||
|
|
faae1db066 | ||
|
|
ba105e18b0 | ||
|
|
8bbb396506 | ||
|
|
8e7f2e92cc | ||
|
|
493a1efe65 | ||
|
|
40a49ddc82 | ||
|
|
2c019965be | ||
|
|
756856799f | ||
|
|
dbb76483e8 | ||
|
|
a65db7121e | ||
|
|
ee4b8708d7 | ||
|
|
2512f9456d | ||
|
|
deb728c38a | ||
|
|
9dbf6dd8d0 | ||
|
|
7cf47ccf54 | ||
|
|
c3c79e4c79 | ||
|
|
56db1d468a | ||
|
|
7fbcae1ef8 | ||
|
|
b7f507cb37 | ||
|
|
9b8f04b065 | ||
|
|
8fc42aeb27 | ||
|
|
d394f38d18 | ||
|
|
6a3791ab31 |
@@ -2,7 +2,7 @@
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --"
|
||||
|
||||
[unstable.git]
|
||||
shallow_index = true
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
filters:
|
||||
- log_store=debug
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
|
||||
40
.github/scripts/deploy-greptimedb.sh
vendored
40
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,12 +3,14 @@
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
||||
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
@@ -35,10 +37,16 @@ function add_greptime_chart() {
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
helm upgrade --install etcd "$ETCD_CHART" \
|
||||
--version "$ETCD_CHART_VERSION" \
|
||||
--create-namespace \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag="$ETCD_IMAGE_TAG" \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
@@ -48,7 +56,8 @@ function deploy_etcd_cluster() {
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||
--create-namespace \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
@@ -66,9 +75,11 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -101,15 +112,17 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
--set objectStorage.s3.root="$DATA_ROOT" \
|
||||
--set objectStorage.credentials.secretName=s3-credentials \
|
||||
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
@@ -134,7 +147,8 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--create-namespace \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
|
||||
6
.github/workflows/semantic-pull-request.yml
vendored
6
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
@@ -12,9 +12,9 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
|
||||
166
Cargo.lock
generated
166
Cargo.lock
generated
@@ -218,7 +218,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -737,7 +737,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1387,7 +1387,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1422,7 +1422,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1447,6 +1447,7 @@ dependencies = [
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"common-version",
|
||||
"common-workload",
|
||||
"dashmap",
|
||||
"datafusion",
|
||||
"datatypes",
|
||||
@@ -1464,6 +1465,7 @@ dependencies = [
|
||||
"promql-parser",
|
||||
"rand 0.9.1",
|
||||
"rustc-hash 2.1.1",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"session",
|
||||
"snafu 0.8.6",
|
||||
@@ -1763,7 +1765,7 @@ checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -1807,7 +1809,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.6",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1816,7 +1818,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1848,7 +1850,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"snafu 0.8.6",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1889,7 +1891,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1951,7 +1953,7 @@ dependencies = [
|
||||
"snafu 0.8.6",
|
||||
"stat",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1997,7 +1999,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -2019,11 +2021,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2049,7 +2051,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -2084,7 +2086,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.8",
|
||||
"common-error",
|
||||
@@ -2097,7 +2099,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-macro",
|
||||
"http 1.3.1",
|
||||
@@ -2108,7 +2110,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-event-recorder"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2130,7 +2132,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2152,7 +2154,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"api",
|
||||
@@ -2192,7 +2194,6 @@ dependencies = [
|
||||
"nalgebra",
|
||||
"num",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
"paste",
|
||||
"pretty_assertions",
|
||||
"s2",
|
||||
@@ -2210,7 +2211,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2227,7 +2228,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2260,7 +2261,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2280,7 +2281,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"greptime-proto",
|
||||
"once_cell",
|
||||
@@ -2291,7 +2292,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"common-error",
|
||||
@@ -2307,7 +2308,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2379,7 +2380,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2388,11 +2389,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2404,7 +2405,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-stream",
|
||||
@@ -2433,7 +2434,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2443,7 +2444,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2468,12 +2469,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datatypes",
|
||||
@@ -2489,7 +2492,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.40",
|
||||
@@ -2518,7 +2521,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-session"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"strum 0.27.1",
|
||||
@@ -2526,7 +2529,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-sql"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -2544,7 +2547,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-base",
|
||||
@@ -2573,7 +2576,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-grpc",
|
||||
@@ -2586,7 +2589,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2604,7 +2607,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"cargo-manifest",
|
||||
@@ -2615,7 +2618,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2638,7 +2641,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-workload"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"common-telemetry",
|
||||
"serde",
|
||||
@@ -3865,7 +3868,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3918,7 +3921,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.6",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.23",
|
||||
@@ -3928,7 +3931,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4602,7 +4605,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4734,7 +4737,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4801,7 +4804,7 @@ dependencies = [
|
||||
"sql",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.13.1",
|
||||
@@ -4856,7 +4859,7 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4919,7 +4922,7 @@ dependencies = [
|
||||
"sqlparser 0.55.0-greptime",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -5299,7 +5302,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=66eb089afa6baaa3ddfafabd0a4abbe317d012c3#66eb089afa6baaa3ddfafabd0a4abbe317d012c3"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36#f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"prost-types 0.13.5",
|
||||
@@ -6061,7 +6064,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -7001,7 +7004,7 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -7013,7 +7016,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -7321,7 +7324,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7349,7 +7352,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7445,7 +7448,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7538,7 +7541,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito-codec"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"bytes",
|
||||
@@ -7562,7 +7565,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7591,6 +7594,7 @@ dependencies = [
|
||||
"crc32fast",
|
||||
"criterion 0.4.0",
|
||||
"crossbeam-utils",
|
||||
"dashmap",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
@@ -7608,6 +7612,7 @@ dependencies = [
|
||||
"moka",
|
||||
"object-store",
|
||||
"parquet",
|
||||
"partition",
|
||||
"paste",
|
||||
"pin-project",
|
||||
"prometheus",
|
||||
@@ -8297,7 +8302,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -8582,7 +8587,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"api",
|
||||
@@ -8640,7 +8645,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.55.0-greptime",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8952,7 +8957,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -9291,7 +9296,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"api",
|
||||
@@ -9447,7 +9452,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.40",
|
||||
@@ -9745,7 +9750,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"async-trait",
|
||||
@@ -10028,7 +10033,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.19",
|
||||
"async-trait",
|
||||
@@ -10070,7 +10075,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"api",
|
||||
@@ -10135,7 +10140,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.55.0-greptime",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -11499,7 +11504,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"api",
|
||||
@@ -11570,6 +11575,7 @@ dependencies = [
|
||||
"openmetrics-parser",
|
||||
"opensrv-mysql",
|
||||
"opentelemetry-proto",
|
||||
"operator",
|
||||
"otel-arrow-rust",
|
||||
"parking_lot 0.12.4",
|
||||
"permutation",
|
||||
@@ -11622,7 +11628,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.12",
|
||||
"api",
|
||||
@@ -11950,7 +11956,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-buffer",
|
||||
@@ -12008,7 +12014,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.40",
|
||||
@@ -12308,7 +12314,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "stat"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"nix 0.30.1",
|
||||
]
|
||||
@@ -12321,7 +12327,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -12468,7 +12474,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -12636,7 +12642,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12905,7 +12911,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12949,7 +12955,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -13021,7 +13027,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.17.0",
|
||||
"substrait 0.18.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
@@ -73,8 +73,8 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.17.0"
|
||||
edition = "2021"
|
||||
version = "0.18.0"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
@@ -145,7 +145,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "66eb089afa6baaa3ddfafabd0a4abbe317d012c3" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "f9836cf8aab30e672f640c6ef4c1cfd2cf9fbc36" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
|
||||
@@ -402,8 +402,8 @@
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
||||
| `stats_persistence.ttl` | String | `30d` | TTL for the stats table that will be used to store the stats. Default is `30d`.<br/>Set to `0s` to disable stats persistence. |
|
||||
| `stats_persistence.interval` | String | `60s` | The interval to persist the stats. Default is `60s`.<br/>The minimum value is `60s`, if the value is less than `60s`, it will be overridden to `60s`. |
|
||||
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
||||
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
|
||||
@@ -274,12 +274,15 @@ ttl = "90d"
|
||||
|
||||
## Configuration options for the stats persistence.
|
||||
[stats_persistence]
|
||||
## TTL for the stats table that will be used to store the stats. Default is `30d`.
|
||||
## TTL for the stats table that will be used to store the stats.
|
||||
## Set to `0s` to disable stats persistence.
|
||||
ttl = "30d"
|
||||
## The interval to persist the stats. Default is `60s`.
|
||||
## The minimum value is `60s`, if the value is less than `60s`, it will be overridden to `60s`.
|
||||
interval = "60s"
|
||||
## Default is `0s`.
|
||||
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
||||
## It is recommended to set a small value, e.g., `3h`.
|
||||
ttl = "0s"
|
||||
## The interval to persist the stats. Default is `10m`.
|
||||
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
||||
interval = "10m"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
|
||||
@@ -30,22 +30,7 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
|
||||
## Profiling
|
||||
|
||||
### Configuration
|
||||
|
||||
You can control heap profiling activation through configuration. Add the following to your configuration file:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
# is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
By default, if you set `MALLOC_CONF=prof:true,prof_active:false`, the database will enable profiling during startup. You can disable this behavior by setting `enable_heap_profiling = false` in the configuration.
|
||||
|
||||
### Starting with environment variables
|
||||
### Enable memory profiling for greptimedb binary
|
||||
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
@@ -57,6 +42,22 @@ MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
### Memory profiling for greptimedb docker image
|
||||
|
||||
We have memory profiling enabled and activated by default in our official docker
|
||||
image.
|
||||
|
||||
This behavior is controlled by configuration `enable_heap_profiling`:
|
||||
|
||||
```toml
|
||||
[memory]
|
||||
# Whether to enable heap profiling activation during startup.
|
||||
# Default is true.
|
||||
enable_heap_profiling = true
|
||||
```
|
||||
|
||||
To disable memory profiling, set `enable_heap_profiling` to `false`.
|
||||
|
||||
### Memory profiling control
|
||||
|
||||
You can control heap profiling activation using the new HTTP APIs:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -87,6 +87,13 @@
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{instance=~"$datanode", operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{instance=~"$datanode"}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{instance=~"$datanode", error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Remote WAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -103,6 +110,8 @@
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -802,6 +802,48 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Remote WAL
|
||||
panels:
|
||||
- title: Triggered region flush total
|
||||
type: timeseries
|
||||
description: Triggered region flush total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_flush_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Triggered region checkpoint total
|
||||
type: timeseries
|
||||
description: Triggered region checkpoint total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_checkpoint_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Topic estimated replay size
|
||||
type: timeseries
|
||||
description: Topic estimated max replay size
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: meta_topic_estimated_replay_size
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Kafka logstore's bytes traffic
|
||||
type: timeseries
|
||||
description: Kafka logstore's bytes traffic
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{logstore}}'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
@@ -948,6 +990,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Reconciliation stats
|
||||
type: timeseries
|
||||
description: Reconciliation stats
|
||||
unit: s
|
||||
queries:
|
||||
- expr: greptime_meta_reconciliation_stats
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||
- title: Reconciliation steps
|
||||
type: timeseries
|
||||
description: 'Elapsed of Reconciliation steps '
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -87,6 +87,13 @@
|
||||
| Other Request P99 per Instance | `histogram_quantile(0.99, sum by(instance, pod, le, scheme, operation) (rate(opendal_operation_duration_seconds_bucket{ operation!~"read\|write\|list\|Writer::write\|Writer::close\|Reader::read"}[$__rate_interval])))` | `timeseries` | Other Request P99 per Instance. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| Opendal traffic | `sum by(instance, pod, scheme, operation) (rate(opendal_operation_bytes_sum{}[$__rate_interval]))` | `timeseries` | Total traffic as in bytes by instance and operation | `prometheus` | `decbytes` | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]` |
|
||||
| OpenDAL errors per Instance | `sum by(instance, pod, scheme, operation, error) (rate(opendal_operation_errors_total{ error!="NotFound"}[$__rate_interval]))` | `timeseries` | OpenDAL error counts per Instance. | `prometheus` | -- | `[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]` |
|
||||
# Remote WAL
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
| Triggered region flush total | `meta_triggered_region_flush_total` | `timeseries` | Triggered region flush total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Triggered region checkpoint total | `meta_triggered_region_checkpoint_total` | `timeseries` | Triggered region checkpoint total | `prometheus` | `none` | `{{pod}}-{{topic_name}}` |
|
||||
| Topic estimated replay size | `meta_topic_estimated_replay_size` | `timeseries` | Topic estimated max replay size | `prometheus` | `bytes` | `{{pod}}-{{topic_name}}` |
|
||||
| Kafka logstore's bytes traffic | `rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])` | `timeseries` | Kafka logstore's bytes traffic | `prometheus` | `bytes` | `{{pod}}-{{logstore}}` |
|
||||
# Metasrv
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
@@ -103,6 +110,8 @@
|
||||
| Meta KV Ops Latency | `histogram_quantile(0.99, sum by(pod, le, op, target) (greptime_meta_kv_request_elapsed_bucket))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `{{pod}}-{{op}} p99` |
|
||||
| Rate of meta KV Ops | `rate(greptime_meta_kv_request_elapsed_count[$__rate_interval])` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `none` | `{{pod}}-{{op}} p99` |
|
||||
| DDL Latency | `histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_tables_bucket))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_view))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_create_flow))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_drop_table))`<br/>`histogram_quantile(0.9, sum by(le, pod, step) (greptime_meta_procedure_alter_table))` | `timeseries` | Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads. | `prometheus` | `s` | `CreateLogicalTables-{{step}} p90` |
|
||||
| Reconciliation stats | `greptime_meta_reconciliation_stats` | `timeseries` | Reconciliation stats | `prometheus` | `s` | `{{pod}}-{{table_type}}-{{type}}` |
|
||||
| Reconciliation steps | `histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)` | `timeseries` | Elapsed of Reconciliation steps | `prometheus` | `s` | `{{procedure_name}}-{{step}}-P90` |
|
||||
# Flownode
|
||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||
| --- | --- | --- | --- | --- | --- | --- |
|
||||
|
||||
@@ -802,6 +802,48 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{scheme}}]-[{{operation}}]-[{{error}}]'
|
||||
- title: Remote WAL
|
||||
panels:
|
||||
- title: Triggered region flush total
|
||||
type: timeseries
|
||||
description: Triggered region flush total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_flush_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Triggered region checkpoint total
|
||||
type: timeseries
|
||||
description: Triggered region checkpoint total
|
||||
unit: none
|
||||
queries:
|
||||
- expr: meta_triggered_region_checkpoint_total
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Topic estimated replay size
|
||||
type: timeseries
|
||||
description: Topic estimated max replay size
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: meta_topic_estimated_replay_size
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{topic_name}}'
|
||||
- title: Kafka logstore's bytes traffic
|
||||
type: timeseries
|
||||
description: Kafka logstore's bytes traffic
|
||||
unit: bytes
|
||||
queries:
|
||||
- expr: rate(greptime_logstore_kafka_client_bytes_total[$__rate_interval])
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{logstore}}'
|
||||
- title: Metasrv
|
||||
panels:
|
||||
- title: Region migration datanode
|
||||
@@ -948,6 +990,26 @@ groups:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: AlterTable-{{step}} p90
|
||||
- title: Reconciliation stats
|
||||
type: timeseries
|
||||
description: Reconciliation stats
|
||||
unit: s
|
||||
queries:
|
||||
- expr: greptime_meta_reconciliation_stats
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{pod}}-{{table_type}}-{{type}}'
|
||||
- title: Reconciliation steps
|
||||
type: timeseries
|
||||
description: 'Elapsed of Reconciliation steps '
|
||||
unit: s
|
||||
queries:
|
||||
- expr: histogram_quantile(0.9, greptime_meta_reconciliation_procedure_bucket)
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: ${metrics}
|
||||
legendFormat: '{{procedure_name}}-{{step}}-P90'
|
||||
- title: Flownode
|
||||
panels:
|
||||
- title: Flow Ingest / Output Rate
|
||||
|
||||
@@ -19,8 +19,8 @@ use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
use snafu::prelude::*;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
|
||||
@@ -16,15 +16,15 @@ use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::BitVec;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_decimal::Decimal128;
|
||||
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
Int8Type, Int16Type, IntervalType, TimeType, TimestampType, UInt8Type, UInt16Type,
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
@@ -295,7 +295,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
| ConcreteDataType::Struct(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
|
||||
}
|
||||
};
|
||||
let datatype_extension = match column_datatype {
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions,
|
||||
SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY,
|
||||
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
||||
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
||||
SkippingIndexType,
|
||||
};
|
||||
use greptime_proto::v1::{
|
||||
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
||||
|
||||
@@ -17,13 +17,13 @@ use std::sync::Arc;
|
||||
use common_base::secrets::SecretString;
|
||||
use digest::Digest;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use snafu::{OptionExt, ensure};
|
||||
|
||||
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
|
||||
use crate::user_provider::static_user_provider::{STATIC_USER_PROVIDER, StaticUserProvider};
|
||||
use crate::user_provider::watch_file_user_provider::{
|
||||
WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
|
||||
WATCH_FILE_USER_PROVIDER, WatchFileUserProvider,
|
||||
};
|
||||
use crate::{UserInfoRef, UserProviderRef};
|
||||
|
||||
|
||||
@@ -22,13 +22,13 @@ mod user_provider;
|
||||
pub mod tests;
|
||||
|
||||
pub use common::{
|
||||
auth_mysql, static_user_provider_from_option, user_provider_from_option, userinfo_by_name,
|
||||
HashedPassword, Identity, Password,
|
||||
HashedPassword, Identity, Password, auth_mysql, static_user_provider_from_option,
|
||||
user_provider_from_option, userinfo_by_name,
|
||||
};
|
||||
pub use permission::{PermissionChecker, PermissionReq, PermissionResp};
|
||||
pub use user_info::UserInfo;
|
||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||
pub use user_provider::UserProvider;
|
||||
pub use user_provider::static_user_provider::StaticUserProvider;
|
||||
|
||||
/// pub type alias
|
||||
pub type UserInfoRef = std::sync::Arc<dyn UserInfo>;
|
||||
|
||||
@@ -21,7 +21,7 @@ use crate::error::{
|
||||
UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
|
||||
use crate::{Identity, Password, UserInfoRef, UserProvider, auth_mysql};
|
||||
|
||||
pub struct DatabaseAuthInfo<'a> {
|
||||
pub catalog: &'a str,
|
||||
|
||||
@@ -22,7 +22,7 @@ use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
use crate::error::{
|
||||
@@ -30,7 +30,7 @@ use crate::error::{
|
||||
UserNotFoundSnafu, UserPasswordMismatchSnafu,
|
||||
};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::{auth_mysql, UserInfoRef};
|
||||
use crate::{UserInfoRef, auth_mysql};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait UserProvider: Send + Sync {
|
||||
|
||||
@@ -102,10 +102,10 @@ pub mod test {
|
||||
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
|
||||
use crate::UserProvider;
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
use crate::user_provider::static_user_provider::StaticUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) {
|
||||
let re = provider
|
||||
@@ -143,12 +143,13 @@ pub mod test {
|
||||
let file = File::create(&file_path);
|
||||
let file = file.unwrap();
|
||||
let mut lw = LineWriter::new(file);
|
||||
assert!(lw
|
||||
.write_all(
|
||||
assert!(
|
||||
lw.write_all(
|
||||
b"root=123456
|
||||
admin=654321",
|
||||
)
|
||||
.is_ok());
|
||||
.is_ok()
|
||||
);
|
||||
lw.flush().unwrap();
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::sync::{Arc, Mutex};
|
||||
use async_trait::async_trait;
|
||||
use common_telemetry::{info, warn};
|
||||
use notify::{EventKind, RecursiveMode, Watcher};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ResultExt, ensure};
|
||||
|
||||
use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
|
||||
use crate::user_info::DefaultUserInfo;
|
||||
@@ -133,9 +133,9 @@ pub mod test {
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::UserProvider;
|
||||
use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
|
||||
use crate::user_provider::{Identity, Password};
|
||||
use crate::UserProvider;
|
||||
|
||||
async fn test_authenticate(
|
||||
provider: &dyn UserProvider,
|
||||
|
||||
6
src/cache/src/lib.rs
vendored
6
src/cache/src/lib.rs
vendored
@@ -19,9 +19,9 @@ use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry,
|
||||
CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
||||
CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder, new_schema_cache,
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_table_schema_cache, new_view_info_cache,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
|
||||
@@ -32,6 +32,7 @@ common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-workload.workspace = true
|
||||
dashmap.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
@@ -48,6 +49,7 @@ prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
rand.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -297,6 +297,20 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to handle query"))]
|
||||
HandleQuery {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to project schema"))]
|
||||
ProjectSchema {
|
||||
source: datatypes::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl Error {
|
||||
@@ -369,6 +383,8 @@ impl ErrorExt for Error {
|
||||
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
||||
StatusCode::Unexpected
|
||||
}
|
||||
Error::HandleQuery { source, .. } => source.status_code(),
|
||||
Error::ProjectSchema { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,25 +14,34 @@
|
||||
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, Role};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::node_manager::DatanodeManagerRef;
|
||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
||||
use common_meta::rpc::procedure;
|
||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||
use common_query::request::QueryRequest;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use common_recordbatch::util::ChainedRecordBatchStream;
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::error;
|
||||
use crate::information_schema::InformationExtension;
|
||||
use crate::information_schema::{DatanodeInspectRequest, InformationExtension};
|
||||
|
||||
pub struct DistributedInformationExtension {
|
||||
meta_client: MetaClientRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
}
|
||||
|
||||
impl DistributedInformationExtension {
|
||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
||||
Self { meta_client }
|
||||
pub fn new(meta_client: MetaClientRef, datanode_manager: DatanodeManagerRef) -> Self {
|
||||
Self {
|
||||
meta_client,
|
||||
datanode_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,4 +107,39 @@ impl InformationExtension for DistributedInformationExtension {
|
||||
.map_err(BoxedError::new)
|
||||
.context(crate::error::ListFlowStatsSnafu)
|
||||
}
|
||||
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||
// Aggregate results from all datanodes
|
||||
let nodes = self
|
||||
.meta_client
|
||||
.list_nodes(Some(Role::Datanode))
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(crate::error::ListNodesSnafu)?;
|
||||
|
||||
let plan = request
|
||||
.build_plan()
|
||||
.context(crate::error::DatafusionSnafu)?;
|
||||
|
||||
let mut streams = Vec::with_capacity(nodes.len());
|
||||
for node in nodes {
|
||||
let client = self.datanode_manager.datanode(&node.peer).await;
|
||||
let stream = client
|
||||
.handle_query(QueryRequest {
|
||||
plan: plan.clone(),
|
||||
region_id: RegionId::default(),
|
||||
header: None,
|
||||
})
|
||||
.await
|
||||
.context(crate::error::HandleQuerySnafu)?;
|
||||
streams.push(stream);
|
||||
}
|
||||
|
||||
let chained =
|
||||
ChainedRecordBatchStream::new(streams).context(crate::error::CreateRecordBatchSnafu)?;
|
||||
Ok(Box::pin(chained))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,4 +21,4 @@ mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||
|
||||
@@ -16,8 +16,8 @@ use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
@@ -26,8 +26,8 @@ use partition::manager::PartitionRuleManager;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
|
||||
@@ -24,12 +24,12 @@ use common_meta::error::Error::CacheNotGet;
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
|
||||
use common_meta::kv_backend::txn::{Txn, TxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::debug;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
@@ -461,17 +461,17 @@ impl KvBackend for MetaKvBackend {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::any::Any;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
|
||||
use super::CachedKvBackend;
|
||||
|
||||
@@ -26,12 +26,12 @@ use common_meta::cache::{
|
||||
LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef,
|
||||
ViewInfoCacheRef,
|
||||
};
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
@@ -41,15 +41,16 @@ use partition::manager::PartitionRuleManagerRef;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::TableRef;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::PartitionRules;
|
||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
@@ -59,9 +60,8 @@ use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
use crate::system_schema::SystemSchemaProvider;
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
///
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_meta::instruction::CacheIdent;
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
pub type TableCacheRef = Arc<TableCache>;
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME};
|
||||
use futures::future::BoxFuture;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
|
||||
@@ -14,4 +14,4 @@
|
||||
|
||||
pub mod manager;
|
||||
|
||||
pub use manager::{new_memory_catalog_manager, MemoryCatalogManager};
|
||||
pub use manager::{MemoryCatalogManager, new_memory_catalog_manager};
|
||||
|
||||
@@ -28,8 +28,8 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use futures_util::stream::BoxStream;
|
||||
use session::context::QueryContext;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
use table::TableRef;
|
||||
use table::metadata::{TableId, TableInfoRef};
|
||||
|
||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
@@ -419,7 +419,7 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
|
||||
mod tests {
|
||||
use common_catalog::consts::*;
|
||||
use futures_util::TryStreamExt;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -454,16 +454,18 @@ mod tests {
|
||||
tables[0].table_info().table_id()
|
||||
);
|
||||
|
||||
assert!(catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
"not_exists",
|
||||
None
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(
|
||||
catalog_list
|
||||
.table(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
"not_exists",
|
||||
None
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -486,11 +488,13 @@ mod tests {
|
||||
table: NumbersTable::table(2333),
|
||||
};
|
||||
catalog.register_table_sync(register_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
assert!(
|
||||
catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some()
|
||||
);
|
||||
|
||||
let deregister_table_req = DeregisterTableRequest {
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -498,10 +502,12 @@ mod tests {
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
catalog.deregister_table_sync(deregister_table_req).unwrap();
|
||||
assert!(catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(
|
||||
catalog
|
||||
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@@ -30,7 +30,7 @@ use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use promql_parser::parser::EvalStmt;
|
||||
use rand::random;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use sql::statements::statement::Statement;
|
||||
|
||||
use crate::error;
|
||||
|
||||
@@ -36,22 +36,26 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::logical_expr::LogicalPlan;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use lazy_static::lazy_static;
|
||||
use paste::paste;
|
||||
use process_list::InformationSchemaProcessList;
|
||||
use store_api::sst_entry::{ManifestSstEntry, StorageSstEntry};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
use table::TableRef;
|
||||
use table::metadata::TableType;
|
||||
pub use table_names::*;
|
||||
use views::InformationSchemaViews;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
@@ -69,7 +73,6 @@ pub(crate) use crate::system_schema::predicate::Predicates;
|
||||
use crate::system_schema::{
|
||||
SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
|
||||
};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
// Memory tables in `information_schema`.
|
||||
@@ -409,8 +412,43 @@ pub trait InformationExtension {
|
||||
|
||||
/// Get the flow statistics. If no flownode is available, return `None`.
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||
|
||||
/// Inspects the datanode.
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error>;
|
||||
}
|
||||
|
||||
/// The request to inspect the datanode.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct DatanodeInspectRequest {
|
||||
/// Kind to fetch from datanode.
|
||||
pub kind: DatanodeInspectKind,
|
||||
|
||||
/// Pushdown scan configuration (projection/predicate/limit) for the returned stream.
|
||||
/// This allows server-side filtering to reduce I/O and network costs.
|
||||
pub scan: ScanRequest,
|
||||
}
|
||||
|
||||
/// The kind of the datanode inspect request.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum DatanodeInspectKind {
|
||||
/// List SST entries recorded in manifest
|
||||
SstManifest,
|
||||
/// List SST entries discovered in storage layer
|
||||
SstStorage,
|
||||
}
|
||||
|
||||
impl DatanodeInspectRequest {
|
||||
/// Builds a logical plan for the datanode inspect request.
|
||||
pub fn build_plan(self) -> std::result::Result<LogicalPlan, DataFusionError> {
|
||||
match self.kind {
|
||||
DatanodeInspectKind::SstManifest => ManifestSstEntry::build_plan(self.scan),
|
||||
DatanodeInspectKind::SstStorage => StorageSstEntry::build_plan(self.scan),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct NoopInformationExtension;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -432,4 +470,11 @@ impl InformationExtension for NoopInformationExtension {
|
||||
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
_request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||
Ok(common_recordbatch::RecordBatches::empty().as_stream())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,37 +18,46 @@ use std::time::Duration;
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::NodeInfo;
|
||||
use common_meta::cluster::{DatanodeStatus, NodeInfo, NodeStatus};
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::timestamp::Timestamp;
|
||||
use common_workload::DatanodeWorkloadType;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, CLUSTER_INFO};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{CLUSTER_INFO, InformationTable, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
|
||||
const PEER_TYPE_FRONTEND: &str = "FRONTEND";
|
||||
const PEER_TYPE_METASRV: &str = "METASRV";
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const CPUS: &str = "cpus";
|
||||
const MEMORY_BYTES: &str = "memory_bytes";
|
||||
const VERSION: &str = "version";
|
||||
const GIT_COMMIT: &str = "git_commit";
|
||||
const START_TIME: &str = "start_time";
|
||||
const UPTIME: &str = "uptime";
|
||||
const ACTIVE_TIME: &str = "active_time";
|
||||
const NODE_STATUS: &str = "node_status";
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -57,11 +66,14 @@ const INIT_CAPACITY: usize = 42;
|
||||
/// - `peer_id`: the peer server id.
|
||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||
/// - `peer_addr`: the peer gRPC address.
|
||||
/// - `cpus`: the number of CPUs of the peer.
|
||||
/// - `memory_bytes`: the memory bytes of the peer.
|
||||
/// - `version`: the build package version of the peer.
|
||||
/// - `git_commit`: the build git commit hash of the peer.
|
||||
/// - `start_time`: the starting time of the peer.
|
||||
/// - `uptime`: the uptime of the peer.
|
||||
/// - `active_time`: the time since the last activity of the peer.
|
||||
/// - `node_status`: the status info of the peer.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaClusterInfo {
|
||||
@@ -82,6 +94,8 @@ impl InformationSchemaClusterInfo {
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CPUS, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(MEMORY_BYTES, ConcreteDataType::uint64_datatype(), false),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
@@ -91,6 +105,7 @@ impl InformationSchemaClusterInfo {
|
||||
),
|
||||
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(NODE_STATUS, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
@@ -140,11 +155,14 @@ struct InformationSchemaClusterInfoBuilder {
|
||||
peer_ids: Int64VectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
cpus: UInt32VectorBuilder,
|
||||
memory_bytes: UInt64VectorBuilder,
|
||||
versions: StringVectorBuilder,
|
||||
git_commits: StringVectorBuilder,
|
||||
start_times: TimestampMillisecondVectorBuilder,
|
||||
uptimes: StringVectorBuilder,
|
||||
active_times: StringVectorBuilder,
|
||||
node_status: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfoBuilder {
|
||||
@@ -155,11 +173,14 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
cpus: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memory_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
node_status: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,9 +197,10 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
|
||||
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||
let peer_type = node_info.status.role_name();
|
||||
let peer_id = peer_id(peer_type, node_info.peer.id);
|
||||
|
||||
let row = [
|
||||
(PEER_ID, &Value::from(node_info.peer.id)),
|
||||
(PEER_ID, &Value::from(peer_id)),
|
||||
(PEER_TYPE, &Value::from(peer_type)),
|
||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||
(VERSION, &Value::from(node_info.version.as_str())),
|
||||
@@ -189,13 +211,7 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
||||
// Always set peer_id to be -1 for frontends and metasrvs
|
||||
self.peer_ids.push(Some(-1));
|
||||
} else {
|
||||
self.peer_ids.push(Some(node_info.peer.id as i64));
|
||||
}
|
||||
|
||||
self.peer_ids.push(Some(peer_id));
|
||||
self.peer_types.push(Some(peer_type));
|
||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||
self.versions.push(Some(&node_info.version));
|
||||
@@ -212,6 +228,8 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
self.start_times.push(None);
|
||||
self.uptimes.push(None);
|
||||
}
|
||||
self.cpus.push(Some(node_info.cpus));
|
||||
self.memory_bytes.push(Some(node_info.memory_bytes));
|
||||
|
||||
if node_info.last_activity_ts > 0 {
|
||||
self.active_times.push(Some(
|
||||
@@ -220,6 +238,8 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
} else {
|
||||
self.active_times.push(None);
|
||||
}
|
||||
self.node_status
|
||||
.push(format_node_status(&node_info).as_deref());
|
||||
}
|
||||
|
||||
fn format_duration_since(ts: u64) -> String {
|
||||
@@ -233,11 +253,14 @@ impl InformationSchemaClusterInfoBuilder {
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.cpus.finish()),
|
||||
Arc::new(self.memory_bytes.finish()),
|
||||
Arc::new(self.versions.finish()),
|
||||
Arc::new(self.git_commits.finish()),
|
||||
Arc::new(self.start_times.finish()),
|
||||
Arc::new(self.uptimes.finish()),
|
||||
Arc::new(self.active_times.finish()),
|
||||
Arc::new(self.node_status.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
@@ -263,3 +286,56 @@ impl DfPartitionStream for InformationSchemaClusterInfo {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn peer_id(peer_type: &str, peer_id: u64) -> i64 {
|
||||
if peer_type == PEER_TYPE_FRONTEND || peer_type == PEER_TYPE_METASRV {
|
||||
-1
|
||||
} else {
|
||||
peer_id as i64
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DisplayMetasrvStatus {
|
||||
is_leader: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DisplayDatanodeStatus {
|
||||
workloads: Vec<DatanodeWorkloadType>,
|
||||
leader_regions: usize,
|
||||
follower_regions: usize,
|
||||
}
|
||||
|
||||
impl From<&DatanodeStatus> for DisplayDatanodeStatus {
|
||||
fn from(status: &DatanodeStatus) -> Self {
|
||||
Self {
|
||||
workloads: status
|
||||
.workloads
|
||||
.types
|
||||
.iter()
|
||||
.flat_map(|w| DatanodeWorkloadType::from_i32(*w))
|
||||
.collect(),
|
||||
leader_regions: status.leader_regions,
|
||||
follower_regions: status.follower_regions,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn format_node_status(node_info: &NodeInfo) -> Option<String> {
|
||||
match &node_info.status {
|
||||
NodeStatus::Datanode(datanode_status) => {
|
||||
serde_json::to_string(&DisplayDatanodeStatus::from(datanode_status)).ok()
|
||||
}
|
||||
NodeStatus::Frontend(_) => None,
|
||||
NodeStatus::Flownode(_) => None,
|
||||
NodeStatus::Metasrv(metasrv_status) => {
|
||||
if metasrv_status.is_leader {
|
||||
serde_json::to_string(&DisplayMetasrvStatus { is_leader: true }).ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
NodeStatus::Standalone => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,9 +23,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, DataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
@@ -38,12 +38,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use sql::statements;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, COLUMNS};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::information_schema::{COLUMNS, InformationTable};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct InformationSchemaColumns {
|
||||
|
||||
@@ -16,10 +16,10 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::FlowId;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::flow::flow_info::FlowInfoValue;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::FlowId;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
@@ -38,14 +38,14 @@ use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::information_schema::{FLOWS, Predicates};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
|
||||
@@ -89,9 +89,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
vec![
|
||||
Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info
|
||||
.commit_short
|
||||
.to_string()])),
|
||||
Arc::new(StringVector::from(vec![
|
||||
build_info.commit_short.to_string(),
|
||||
])),
|
||||
Arc::new(StringVector::from(vec![build_info.clean.to_string()])),
|
||||
Arc::new(StringVector::from(vec![build_info.version.to_string()])),
|
||||
],
|
||||
@@ -369,17 +369,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
TRIGGERS => (
|
||||
vec![
|
||||
string_column("TRIGGER_NAME"),
|
||||
ColumnSchema::new(
|
||||
"trigger_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
|
||||
string_column("TRIGGER_DEFINITION"),
|
||||
ColumnSchema::new(
|
||||
"flownode_id",
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
|
||||
],
|
||||
vec![],
|
||||
),
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, FulltextBackend, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -31,11 +31,11 @@ use futures_util::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, KEY_COLUMN_USAGE};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, KEY_COLUMN_USAGE, Predicates};
|
||||
|
||||
pub const CONSTRAINT_SCHEMA: &str = "constraint_schema";
|
||||
pub const CONSTRAINT_NAME: &str = "constraint_name";
|
||||
@@ -277,15 +277,15 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
constraints.push(CONSTRAINT_NAME_INVERTED_INDEX);
|
||||
greptime_index_type.push(INDEX_TYPE_INVERTED_INDEX);
|
||||
}
|
||||
if let Ok(Some(options)) = column.fulltext_options() {
|
||||
if options.enable {
|
||||
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
||||
let index_type = match options.backend {
|
||||
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
||||
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
||||
};
|
||||
greptime_index_type.push(index_type);
|
||||
}
|
||||
if let Ok(Some(options)) = column.fulltext_options()
|
||||
&& options.enable
|
||||
{
|
||||
constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX);
|
||||
let index_type = match options.backend {
|
||||
FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM,
|
||||
FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY,
|
||||
};
|
||||
greptime_index_type.push(index_type);
|
||||
}
|
||||
if column.is_skipping_indexed() {
|
||||
constraints.push(CONSTRAINT_NAME_SKIPPING_INDEX);
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMicrosecond;
|
||||
@@ -39,13 +39,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, PARTITIONS};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, PARTITIONS, Predicates};
|
||||
|
||||
const TABLE_CATALOG: &str = "table_catalog";
|
||||
const TABLE_SCHEMA: &str = "table_schema";
|
||||
|
||||
@@ -22,9 +22,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
@@ -33,10 +33,10 @@ use datatypes::vectors::{StringVectorBuilder, TimestampMillisecondVectorBuilder}
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, PROCEDURE_INFO};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::system_schema::information_schema::{InformationTable, PROCEDURE_INFO, Predicates};
|
||||
use crate::system_schema::utils;
|
||||
|
||||
const PROCEDURE_ID: &str = "procedure_id";
|
||||
const PROCEDURE_TYPE: &str = "procedure_type";
|
||||
|
||||
@@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::common::HashMap;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -35,13 +35,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, REGION_PEERS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
|
||||
@@ -30,16 +30,17 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, UInt64VectorB
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, REGION_STATISTICS};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
const TABLE_ID: &str = "table_id";
|
||||
const REGION_NUMBER: &str = "region_number";
|
||||
const REGION_ROWS: &str = "region_rows";
|
||||
const WRITTEN_BYTES: &str = "written_bytes_since_open";
|
||||
const DISK_SIZE: &str = "disk_size";
|
||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||
const MANIFEST_SIZE: &str = "manifest_size";
|
||||
@@ -57,6 +58,7 @@ const INIT_CAPACITY: usize = 42;
|
||||
/// - `table_id`: The table id.
|
||||
/// - `region_number`: The region number.
|
||||
/// - `region_rows`: The number of rows in region.
|
||||
/// - `written_bytes_since_open`: The total bytes written of the region since region opened.
|
||||
/// - `memtable_size`: The memtable size in bytes.
|
||||
/// - `disk_size`: The approximate disk size in bytes.
|
||||
/// - `manifest_size`: The manifest size in bytes.
|
||||
@@ -84,6 +86,7 @@ impl InformationSchemaRegionStatistics {
|
||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
||||
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(WRITTEN_BYTES, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||
@@ -147,6 +150,7 @@ struct InformationSchemaRegionStatisticsBuilder {
|
||||
table_ids: UInt32VectorBuilder,
|
||||
region_numbers: UInt32VectorBuilder,
|
||||
region_rows: UInt64VectorBuilder,
|
||||
written_bytes: UInt64VectorBuilder,
|
||||
disk_sizes: UInt64VectorBuilder,
|
||||
memtable_sizes: UInt64VectorBuilder,
|
||||
manifest_sizes: UInt64VectorBuilder,
|
||||
@@ -166,6 +170,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
written_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -197,6 +202,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
||||
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
||||
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
||||
(WRITTEN_BYTES, &Value::from(region_stat.written_bytes)),
|
||||
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||
@@ -216,6 +222,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
self.region_numbers
|
||||
.push(Some(region_stat.id.region_number()));
|
||||
self.region_rows.push(Some(region_stat.num_rows));
|
||||
self.written_bytes.push(Some(region_stat.written_bytes));
|
||||
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||
@@ -232,6 +239,7 @@ impl InformationSchemaRegionStatisticsBuilder {
|
||||
Arc::new(self.table_ids.finish()),
|
||||
Arc::new(self.region_numbers.finish()),
|
||||
Arc::new(self.region_rows.finish()),
|
||||
Arc::new(self.written_bytes.finish()),
|
||||
Arc::new(self.disk_sizes.finish()),
|
||||
Arc::new(self.memtable_sizes.finish()),
|
||||
Arc::new(self.manifest_sizes.finish()),
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -31,13 +31,13 @@ use datatypes::vectors::StringVectorBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, SCHEMATA};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
pub const SCHEMA_NAME: &str = "schema_name";
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
@@ -32,15 +32,15 @@ use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::information_schema::key_column_usage::{
|
||||
CONSTRAINT_NAME_PRI, CONSTRAINT_NAME_TIME_INDEX,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::information_schema::{InformationTable, TABLE_CONSTRAINTS};
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -37,12 +37,12 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::{TableInfo, TableType};
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, TABLES};
|
||||
use crate::system_schema::utils;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
@@ -32,13 +32,13 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result,
|
||||
UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates, VIEWS};
|
||||
use crate::CatalogManager;
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -34,10 +34,10 @@ use table::TableRef;
|
||||
pub use table_names::*;
|
||||
|
||||
use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef};
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::memory_table::MemoryTable;
|
||||
use crate::system_schema::utils::tables::u32_column;
|
||||
use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef};
|
||||
use crate::CatalogManager;
|
||||
|
||||
lazy_static! {
|
||||
static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE];
|
||||
|
||||
@@ -32,15 +32,15 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
use table::metadata::TableType;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_CLASS};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_CLASS, query_ctx};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
|
||||
// === column name ===
|
||||
pub const RELNAME: &str = "relname";
|
||||
|
||||
@@ -29,15 +29,15 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||
use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_DATABASE, query_ctx};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
|
||||
// === column name ===
|
||||
pub const DATNAME: &str = "datname";
|
||||
|
||||
@@ -35,16 +35,16 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::Predicates;
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::system_schema::pg_catalog::{
|
||||
query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE,
|
||||
OID_COLUMN_NAME, PG_NAMESPACE, PGNamespaceOidMapRef, query_ctx,
|
||||
};
|
||||
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||
use crate::system_schema::SystemTable;
|
||||
use crate::CatalogManager;
|
||||
|
||||
const NSPNAME: &str = "nspname";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
@@ -339,18 +339,22 @@ mod tests {
|
||||
assert!(!p.eval(&wrong_row).unwrap());
|
||||
assert!(p.eval(&[]).is_none());
|
||||
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.is_none());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.unwrap());
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.is_none()
|
||||
);
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
//Predicate::Or
|
||||
let p = Predicate::Or(Box::new(p1), Box::new(p2));
|
||||
@@ -358,18 +362,22 @@ mod tests {
|
||||
assert!(p.eval(&wrong_row).unwrap());
|
||||
assert!(p.eval(&[]).is_none());
|
||||
assert!(p.eval(&[("c", &a_value)]).is_none());
|
||||
assert!(!p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.unwrap());
|
||||
assert!(p
|
||||
.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.is_none());
|
||||
assert!(
|
||||
!p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)])
|
||||
.is_none()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -17,10 +17,10 @@ use std::sync::Weak;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::CatalogManager;
|
||||
use crate::error::{GetInformationExtensionSnafu, Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||
use crate::information_schema::InformationExtensionRef;
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub mod tables;
|
||||
|
||||
|
||||
@@ -17,27 +17,27 @@ use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef};
|
||||
use common_query::logical_plan::{SubstraitPlanDecoderRef, rename_logical_plan_columns};
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::datasource::{TableProvider, provider_as_source};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use itertools::Itertools;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
pub mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::CatalogManagerRef;
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu,
|
||||
QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
ViewPlanColumnsChangedSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
@@ -272,7 +272,7 @@ mod tests {
|
||||
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||
use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder, col, lit};
|
||||
|
||||
use crate::information_schema::NoopInformationExtension;
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ use datafusion::datasource::TableProvider;
|
||||
use snafu::OptionExt;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::TableNotExistSnafu;
|
||||
use crate::CatalogManagerRef;
|
||||
use crate::error::TableNotExistSnafu;
|
||||
|
||||
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::bench::{
|
||||
|
||||
@@ -18,9 +18,9 @@ mod import;
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::data::export::ExportCommand;
|
||||
use crate::data::import::ImportCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
||||
#[derive(Subcommand)]
|
||||
|
||||
@@ -24,18 +24,18 @@ use common_error::ext::BoxedError;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use object_store::layers::LoggingLayer;
|
||||
use object_store::services::Oss;
|
||||
use object_store::{services, ObjectStore};
|
||||
use object_store::{ObjectStore, services};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||
SchemaNotFoundSnafu,
|
||||
};
|
||||
use crate::{database, Tool};
|
||||
use crate::{Tool, database};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
|
||||
@@ -25,9 +25,9 @@ use snafu::{OptionExt, ResultExt};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::{database, Tool};
|
||||
use crate::{Tool, database};
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ImportTarget {
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::engine::general_purpose;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use humantime::format_duration;
|
||||
use serde_json::Value;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||
use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
||||
use servers::http::GreptimeQueryOutput;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
|
||||
@@ -21,10 +21,10 @@ mod utils;
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::metadata::control::{DelCommand, GetCommand};
|
||||
use crate::metadata::repair::RepairLogicalTablesCommand;
|
||||
use crate::metadata::snapshot::SnapshotCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// Command for managing metadata operations,
|
||||
/// including saving and restoring metadata snapshots,
|
||||
|
||||
@@ -16,9 +16,9 @@ use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use meta_srv::bootstrap::create_etcd_client_with_tls;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -83,6 +83,20 @@ pub(crate) struct StoreConfig {
|
||||
}
|
||||
|
||||
impl StoreConfig {
|
||||
pub fn tls_config(&self) -> Option<TlsOption> {
|
||||
if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a [`KvBackendRef`] from the store configuration.
|
||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||
let max_txn_ops = self.max_txn_ops;
|
||||
@@ -92,17 +106,7 @@ impl StoreConfig {
|
||||
} else {
|
||||
let kvbackend = match self.backend {
|
||||
BackendImpl::EtcdStore => {
|
||||
let tls_config = if self.backend_tls_mode != TlsMode::Disable {
|
||||
Some(TlsOption {
|
||||
mode: self.backend_tls_mode.clone(),
|
||||
cert_path: self.backend_tls_cert_path.clone(),
|
||||
key_path: self.backend_tls_key_path.clone(),
|
||||
ca_cert_path: self.backend_tls_ca_cert_path.clone(),
|
||||
watch: self.backend_tls_watch,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tls_config = self.tls_config();
|
||||
let etcd_client = create_etcd_client_with_tls(store_addrs, tls_config.as_ref())
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
@@ -111,7 +115,8 @@ impl StoreConfig {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
BackendImpl::PostgresStore => {
|
||||
let table_name = &self.meta_table_name;
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, None)
|
||||
let tls_config = self.tls_config();
|
||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs, tls_config)
|
||||
.await
|
||||
.map_err(BoxedError::new)?;
|
||||
let schema_name = self.meta_schema_name.as_deref();
|
||||
|
||||
@@ -18,9 +18,9 @@ mod table;
|
||||
use clap::Subcommand;
|
||||
use common_error::ext::BoxedError;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::metadata::control::del::key::DelKeyCommand;
|
||||
use crate::metadata::control::del::table::DelTableCommand;
|
||||
use crate::Tool;
|
||||
|
||||
/// The prefix of the tombstone keys.
|
||||
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
|
||||
|
||||
@@ -19,9 +19,9 @@ use common_meta::key::tombstone::TombstoneManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::Tool;
|
||||
|
||||
/// Delete key-value pairs logically from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -102,8 +102,8 @@ mod tests {
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::control::del::key::KeyDeleter;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::del::key::KeyDeleter;
|
||||
use crate::metadata::control::test_utils::put_key;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -18,16 +18,16 @@ use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::ddl::utils::get_region_wal_options;
|
||||
use common_meta::key::table_name::TableNameManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_name::TableNameManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::utils::get_table_id_by_name;
|
||||
use crate::Tool;
|
||||
|
||||
/// Delete table metadata logically from the metadata store.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -183,15 +183,15 @@ mod tests {
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
|
||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
||||
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -19,18 +19,18 @@ use clap::{Parser, Subcommand};
|
||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_info::TableInfoKey;
|
||||
use common_meta::key::table_route::TableRouteKey;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream};
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::error::InvalidArgumentsSnafu;
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter};
|
||||
use crate::Tool;
|
||||
|
||||
/// Getting metadata from metadata store.
|
||||
#[derive(Subcommand)]
|
||||
|
||||
@@ -31,18 +31,18 @@ use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leaders, RegionRoute};
|
||||
use common_meta::rpc::router::{RegionRoute, find_leaders};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ResultExt, ensure};
|
||||
use store_api::storage::TableId;
|
||||
|
||||
use crate::Tool;
|
||||
use crate::error::{
|
||||
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
|
||||
};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
|
||||
use crate::Tool;
|
||||
|
||||
/// Repair metadata of logical tables.
|
||||
#[derive(Debug, Default, Parser)]
|
||||
@@ -301,7 +301,10 @@ impl RepairTool {
|
||||
warn!(
|
||||
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
|
||||
full_table_name,
|
||||
failed_peers.iter().map(|(peer, _)| peer.id).collect::<Vec<_>>()
|
||||
failed_peers
|
||||
.iter()
|
||||
.map(|(peer, _)| peer.id)
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
let create_table_expr =
|
||||
@@ -320,8 +323,7 @@ impl RepairTool {
|
||||
}
|
||||
info!(
|
||||
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
|
||||
full_table_name,
|
||||
peer.id
|
||||
full_table_name, peer.id
|
||||
);
|
||||
|
||||
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
use client::api::v1::alter_table_expr::Kind;
|
||||
use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader};
|
||||
use client::api::v1::region::{AlterRequests, RegionRequest, RegionRequestHeader, region_request};
|
||||
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
|
||||
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||
use common_meta::rpc::router::{RegionRoute, find_leader_regions};
|
||||
use operator::expr_helper::column_schemas_to_defs;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
||||
use client::api::v1::CreateTableExpr;
|
||||
use client::api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader, region_request};
|
||||
use common_meta::ddl::create_logical_tables::create_region_request_builder;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
||||
use common_meta::rpc::router::{RegionRoute, find_leader_regions};
|
||||
use operator::expr_helper::column_schemas_to_defs;
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
@@ -19,13 +19,13 @@ use clap::{Parser, Subcommand};
|
||||
use common_base::secrets::{ExposeSecret, SecretString};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::snapshot::MetadataSnapshotManager;
|
||||
use object_store::services::{Fs, S3};
|
||||
use object_store::ObjectStore;
|
||||
use object_store::services::{Fs, S3};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::Tool;
|
||||
use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
||||
use crate::metadata::common::StoreConfig;
|
||||
use crate::Tool;
|
||||
|
||||
/// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information.
|
||||
#[derive(Subcommand)]
|
||||
@@ -258,11 +258,13 @@ impl Tool for MetaRestoreTool {
|
||||
Ok(())
|
||||
} else if !self.force {
|
||||
common_telemetry::warn!(
|
||||
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
||||
);
|
||||
"The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option."
|
||||
);
|
||||
Ok(())
|
||||
} else {
|
||||
common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force.");
|
||||
common_telemetry::info!(
|
||||
"The target source is not clean, We will restore the metadata snapshot with --force."
|
||||
);
|
||||
self.inner
|
||||
.restore(&self.source_file)
|
||||
.await
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::collections::VecDeque;
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use futures::Stream;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::HealthCheckRequest;
|
||||
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
||||
use parking_lot::RwLock;
|
||||
@@ -27,7 +27,7 @@ use tonic::codec::CompressionEncoding;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
use crate::{error, Result};
|
||||
use crate::{Result, error};
|
||||
|
||||
pub struct FlightClient {
|
||||
addr: String,
|
||||
|
||||
@@ -21,9 +21,9 @@ use common_meta::node_manager::{DatanodeManager, DatanodeRef, FlownodeManager, F
|
||||
use common_meta::peer::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::Client;
|
||||
use crate::flow::FlowRequester;
|
||||
use crate::region::RegionRequester;
|
||||
use crate::Client;
|
||||
|
||||
pub struct NodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
|
||||
@@ -27,8 +27,8 @@ use api::v1::{
|
||||
};
|
||||
use arrow_flight::{FlightData, Ticket};
|
||||
use async_stream::stream;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
@@ -42,7 +42,7 @@ use common_telemetry::{error, warn};
|
||||
use futures::future;
|
||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||
use prost::Message;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap, MetadataValue};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
@@ -50,7 +50,7 @@ use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
||||
InvalidTonicMetadataValueSnafu,
|
||||
};
|
||||
use crate::{error, from_grpc_response, Client, Result};
|
||||
use crate::{Client, Result, error, from_grpc_response};
|
||||
|
||||
type FlightDataStream = Pin<Box<dyn Stream<Item = FlightData> + Send>>;
|
||||
|
||||
@@ -379,11 +379,10 @@ impl Database {
|
||||
tonic_code,
|
||||
e
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})
|
||||
})?;
|
||||
|
||||
let flight_data_stream = response.into_inner();
|
||||
|
||||
@@ -18,9 +18,9 @@ use common_error::define_from_tonic_status;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
use snafu::{Location, Snafu, location};
|
||||
use tonic::Code;
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
|
||||
@@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::flow::{DirtyWindowRequest, DirtyWindowRequests, FlowRequest, FlowResponse};
|
||||
use api::v1::flow::{DirtyWindowRequests, FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
use crate::Client;
|
||||
use crate::error::{FlowServerSnafu, Result};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FlowRequester {
|
||||
@@ -47,7 +47,7 @@ impl Flownode for FlowRequester {
|
||||
|
||||
async fn handle_mark_window_dirty(
|
||||
&self,
|
||||
req: DirtyWindowRequest,
|
||||
req: DirtyWindowRequests,
|
||||
) -> common_meta::error::Result<FlowResponse> {
|
||||
self.handle_mark_window_dirty(req)
|
||||
.await
|
||||
@@ -102,12 +102,10 @@ impl FlowRequester {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
|
||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequests) -> Result<FlowResponse> {
|
||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||
let response = client
|
||||
.handle_mark_dirty_time_window(DirtyWindowRequests {
|
||||
requests: vec![req],
|
||||
})
|
||||
.handle_mark_dirty_time_window(req)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
let code = e.code();
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::RegionRequest;
|
||||
use api::v1::ResponseHeader;
|
||||
use api::v1::region::RegionRequest;
|
||||
use arc_swap::ArcSwapOption;
|
||||
use arrow_flight::Ticket;
|
||||
use async_stream::stream;
|
||||
@@ -33,7 +33,7 @@ use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use prost::Message;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use snafu::{location, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, location};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
@@ -41,7 +41,7 @@ use crate::error::{
|
||||
self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu,
|
||||
IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu,
|
||||
};
|
||||
use crate::{metrics, Client, Error};
|
||||
use crate::{Client, Error, metrics};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RegionRequester {
|
||||
@@ -115,11 +115,10 @@ impl RegionRequester {
|
||||
flight_client.addr(),
|
||||
tonic_code
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: flight_client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})
|
||||
})?;
|
||||
|
||||
let flight_data_stream = response.into_inner();
|
||||
|
||||
@@ -18,7 +18,7 @@ use clap::{Parser, Subcommand};
|
||||
use cmd::datanode::builder::InstanceBuilder;
|
||||
use cmd::error::{InitTlsProviderSnafu, Result};
|
||||
use cmd::options::GlobalOptions;
|
||||
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||
use cmd::{App, cli, datanode, flownode, frontend, metasrv, standalone};
|
||||
use common_base::Plugins;
|
||||
use common_version::{verbose_version, version};
|
||||
use servers::install_ring_crypto_provider;
|
||||
@@ -143,8 +143,10 @@ async fn start(cli: Command) -> Result<()> {
|
||||
}
|
||||
|
||||
fn setup_human_panic() {
|
||||
human_panic::setup_panic!(human_panic::Metadata::new("GreptimeDB", version())
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions"));
|
||||
human_panic::setup_panic!(
|
||||
human_panic::Metadata::new("GreptimeDB", version())
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
||||
);
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use snafu::ResultExt;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{error, App, Result};
|
||||
use crate::{App, Result, error};
|
||||
pub const APP_NAME: &str = "greptime-cli";
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -109,7 +109,7 @@ mod tests {
|
||||
|
||||
use crate::error::Result as CmdResult;
|
||||
use crate::options::GlobalOptions;
|
||||
use crate::{cli, standalone, App};
|
||||
use crate::{App, cli, standalone};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
|
||||
@@ -20,20 +20,20 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
use common_telemetry::{info, warn};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::datanode::Datanode;
|
||||
use meta_client::MetaClientOptions;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ResultExt, ensure};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::App;
|
||||
use crate::datanode::builder::InstanceBuilder;
|
||||
use crate::error::{
|
||||
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::App;
|
||||
|
||||
pub const APP_NAME: &str = "greptime-datanode";
|
||||
|
||||
@@ -187,29 +187,39 @@ impl StartCommand {
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
} else if let Some(addr) = &opts.rpc_addr {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead.");
|
||||
warn!(
|
||||
"Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead."
|
||||
);
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(server_addr) = &self.rpc_server_addr {
|
||||
opts.grpc.server_addr.clone_from(server_addr);
|
||||
} else if let Some(server_addr) = &opts.rpc_hostname {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead.");
|
||||
warn!(
|
||||
"Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead."
|
||||
);
|
||||
opts.grpc.server_addr.clone_from(server_addr);
|
||||
}
|
||||
|
||||
if let Some(runtime_size) = opts.rpc_runtime_size {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead.");
|
||||
warn!(
|
||||
"Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead."
|
||||
);
|
||||
opts.grpc.runtime_size = runtime_size;
|
||||
}
|
||||
|
||||
if let Some(max_recv_message_size) = opts.rpc_max_recv_message_size {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead.");
|
||||
warn!(
|
||||
"Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead."
|
||||
);
|
||||
opts.grpc.max_recv_message_size = max_recv_message_size;
|
||||
}
|
||||
|
||||
if let Some(max_send_message_size) = opts.rpc_max_send_message_size {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead.");
|
||||
warn!(
|
||||
"Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead."
|
||||
);
|
||||
opts.grpc.max_send_message_size = max_send_message_size;
|
||||
}
|
||||
|
||||
@@ -430,20 +440,24 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
assert!((StartCommand {
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.is_err());
|
||||
assert!(
|
||||
(StartCommand {
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.is_err()
|
||||
);
|
||||
|
||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||
assert!((StartCommand {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.is_ok());
|
||||
assert!(
|
||||
(StartCommand {
|
||||
node_id: Some(42),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -26,7 +26,7 @@ use meta_client::MetaClientType;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::datanode::{DatanodeOptions, Instance, APP_NAME};
|
||||
use crate::datanode::{APP_NAME, DatanodeOptions, Instance};
|
||||
use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
|
||||
@@ -25,20 +25,20 @@ use common_base::Plugins;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use flow::{
|
||||
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
|
||||
FrontendClient, FrontendInvoker,
|
||||
FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder, FrontendClient, FrontendInvoker,
|
||||
get_flow_auth_options,
|
||||
};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use snafu::{OptionExt, ResultExt, ensure};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
@@ -46,7 +46,7 @@ use crate::error::{
|
||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||
};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-flownode";
|
||||
|
||||
@@ -341,8 +341,18 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let information_extension =
|
||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
// flownode's frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
let channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
let client = Arc::new(NodeClients::new(channel_config));
|
||||
|
||||
let information_extension = Arc::new(DistributedInformationExtension::new(
|
||||
meta_client.clone(),
|
||||
client.clone(),
|
||||
));
|
||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
@@ -398,14 +408,6 @@ impl StartCommand {
|
||||
flownode.setup_services(services);
|
||||
let flownode = flownode;
|
||||
|
||||
// flownode's frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
let channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
..Default::default()
|
||||
};
|
||||
let client = Arc::new(NodeClients::new(channel_config));
|
||||
|
||||
let invoker = FrontendInvoker::build_from(
|
||||
flownode.flow_engine().streaming_engine(),
|
||||
catalog_manager.clone(),
|
||||
|
||||
@@ -27,11 +27,11 @@ use common_base::Plugins;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_grpc::channel_manager::ChannelConfig;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, verbose_version};
|
||||
use frontend::frontend::Frontend;
|
||||
@@ -48,7 +48,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
@@ -378,8 +378,24 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let information_extension =
|
||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
// frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
let mut channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
||||
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
||||
..Default::default()
|
||||
};
|
||||
if opts.grpc.flight_compression.transport_compression() {
|
||||
channel_config.accept_compression = true;
|
||||
channel_config.send_compression = true;
|
||||
}
|
||||
let client = Arc::new(NodeClients::new(channel_config));
|
||||
|
||||
let information_extension = Arc::new(DistributedInformationExtension::new(
|
||||
meta_client.clone(),
|
||||
client.clone(),
|
||||
));
|
||||
|
||||
let process_manager = Arc::new(ProcessManager::new(
|
||||
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
@@ -413,26 +429,12 @@ impl StartCommand {
|
||||
);
|
||||
let heartbeat_task = Some(heartbeat_task);
|
||||
|
||||
// frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
let mut channel_config = ChannelConfig {
|
||||
timeout: None,
|
||||
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
||||
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
||||
..Default::default()
|
||||
};
|
||||
if opts.grpc.flight_compression.transport_compression() {
|
||||
channel_config.accept_compression = true;
|
||||
channel_config.send_compression = true;
|
||||
}
|
||||
let client = NodeClients::new(channel_config);
|
||||
|
||||
let instance = FrontendBuilder::new(
|
||||
opts.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager,
|
||||
Arc::new(client),
|
||||
client,
|
||||
meta_client,
|
||||
process_manager,
|
||||
)
|
||||
|
||||
@@ -46,7 +46,7 @@ lazy_static::lazy_static! {
|
||||
/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
|
||||
#[cfg(unix)]
|
||||
async fn start_wait_for_close_signal() -> std::io::Result<()> {
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
let mut sigint = signal(SignalKind::interrupt())?;
|
||||
let mut sigterm = signal(SignalKind::terminate())?;
|
||||
|
||||
@@ -93,13 +93,13 @@ pub trait App: Send {
|
||||
|
||||
self.start().await?;
|
||||
|
||||
if self.wait_signal() {
|
||||
if let Err(e) = start_wait_for_close_signal().await {
|
||||
error!(e; "Failed to listen for close signal");
|
||||
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
}
|
||||
if self.wait_signal()
|
||||
&& let Err(e) = start_wait_for_close_signal().await
|
||||
{
|
||||
error!(e; "Failed to listen for close signal");
|
||||
// It's unusual to fail to listen for close signal, maybe there's something unexpected in
|
||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||
// investigate the issue.
|
||||
}
|
||||
|
||||
self.stop().await?;
|
||||
|
||||
@@ -21,7 +21,7 @@ use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
@@ -30,7 +30,7 @@ use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App};
|
||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
||||
|
||||
|
||||
@@ -19,15 +19,16 @@ use std::{fs, path};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_schema::InformationExtension;
|
||||
use catalog::information_schema::{DatanodeInspectRequest, InformationExtension};
|
||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::SendableRecordBatchStream;
|
||||
use client::api::v1::meta::RegionRole;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_base::Plugins;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_config::{Configurable, KvBackendConfig, metadata_store_dir};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
@@ -36,8 +37,8 @@ use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::peer::Peer;
|
||||
@@ -45,12 +46,13 @@ use common_meta::procedure_executor::LocalProcedureExecutor;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_meta::wal_options_allocator::{WalOptionsAllocatorRef, build_wal_options_allocator};
|
||||
use common_options::memory::MemoryOptions;
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
use common_query::request::QueryRequest;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{
|
||||
LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR,
|
||||
DEFAULT_LOGGING_DIR, LoggingOptions, SlowQueryOptions, TracingOptions,
|
||||
};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, verbose_version};
|
||||
@@ -80,12 +82,13 @@ use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{Result, StartFlownodeSnafu};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile, App};
|
||||
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-standalone";
|
||||
|
||||
@@ -787,6 +790,10 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
// Use `self.start_time_ms` instead.
|
||||
// It's not precise but enough.
|
||||
start_time_ms: self.start_time_ms,
|
||||
cpus: common_config::utils::get_cpus() as u32,
|
||||
memory_bytes: common_config::utils::get_sys_total_memory()
|
||||
.unwrap_or_default()
|
||||
.as_bytes(),
|
||||
};
|
||||
Ok(vec![node_info])
|
||||
}
|
||||
@@ -834,7 +841,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
region_manifest: region_stat.manifest.into(),
|
||||
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
||||
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
||||
write_bytes: 0,
|
||||
written_bytes: region_stat.written_bytes,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -852,6 +859,25 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
.await,
|
||||
))
|
||||
}
|
||||
|
||||
async fn inspect_datanode(
|
||||
&self,
|
||||
request: DatanodeInspectRequest,
|
||||
) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
|
||||
let req = QueryRequest {
|
||||
plan: request
|
||||
.build_plan()
|
||||
.context(catalog::error::DatafusionSnafu)?,
|
||||
region_id: RegionId::default(),
|
||||
header: None,
|
||||
};
|
||||
|
||||
self.region_server
|
||||
.handle_read(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(catalog::error::InternalSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -18,9 +18,9 @@ use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::FlownodeOptions;
|
||||
|
||||
@@ -19,8 +19,8 @@
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::task::AtomicWaker;
|
||||
|
||||
@@ -17,8 +17,8 @@ use std::io;
|
||||
use std::ops::Range;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::ops::{Div, Mul};
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::de::{Unexpected, Visitor};
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer, de};
|
||||
|
||||
const UNIT: u64 = 1;
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::{any, fmt};
|
||||
|
||||
use serde::{de, ser, Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize, de, ser};
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
/// Wrapper type for strings that contains secrets. See also [SecretBox].
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
use config::{Environment, File, FileFormat};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
||||
|
||||
@@ -39,6 +39,24 @@ pub fn get_sys_total_memory() -> Option<ReadableSize> {
|
||||
}
|
||||
}
|
||||
|
||||
/// `ResourceSpec` holds the static resource specifications of a node,
|
||||
/// such as CPU cores and memory capacity. These values are fixed
|
||||
/// at startup and do not change dynamically during runtime.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ResourceSpec {
|
||||
pub cpus: usize,
|
||||
pub memory: Option<ReadableSize>,
|
||||
}
|
||||
|
||||
impl Default for ResourceSpec {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cpus: get_cpus(),
|
||||
memory: get_sys_total_memory(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -45,11 +45,11 @@ pub trait ArrowWriterCloser {
|
||||
}
|
||||
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
||||
F: Fn(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
||||
F: Fn(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
||||
/// if any row's been written.
|
||||
@@ -67,11 +67,11 @@ impl<
|
||||
}
|
||||
|
||||
impl<
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder,
|
||||
F: Fn(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
T: AsyncWrite + Send + Unpin,
|
||||
U: DfRecordBatchEncoder,
|
||||
F: Fn(String) -> Fut,
|
||||
Fut: Future<Output = Result<T>>,
|
||||
> LazyBufferedWriter<T, U, F>
|
||||
{
|
||||
/// Closes the writer and flushes the buffer data.
|
||||
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user