mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-09 14:52:58 +00:00
Compare commits
6 Commits
basic_with
...
flow/min_o
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c8fde8112a | ||
|
|
4534e4c31d | ||
|
|
3d8278dc4c | ||
|
|
e4328380b2 | ||
|
|
e962076207 | ||
|
|
9ef8ba6460 |
@@ -30,7 +30,7 @@ update_helm_charts_version() {
|
|||||||
|
|
||||||
# Commit the changes.
|
# Commit the changes.
|
||||||
git add .
|
git add .
|
||||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
git commit -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
git push origin $BRANCH_NAME
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
# Create a Pull Request.
|
# Create a Pull Request.
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ update_homebrew_greptime_version() {
|
|||||||
|
|
||||||
# Commit the changes.
|
# Commit the changes.
|
||||||
git add .
|
git add .
|
||||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
git commit -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
git push origin $BRANCH_NAME
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
# Create a Pull Request.
|
# Create a Pull Request.
|
||||||
|
|||||||
50
Cargo.lock
generated
50
Cargo.lock
generated
@@ -3252,7 +3252,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion"
|
name = "datafusion"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-array 54.2.1",
|
"arrow-array 54.2.1",
|
||||||
@@ -3303,7 +3303,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-catalog"
|
name = "datafusion-catalog"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -3323,7 +3323,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-catalog-listing"
|
name = "datafusion-catalog-listing"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-schema 54.3.1",
|
"arrow-schema 54.3.1",
|
||||||
@@ -3346,7 +3346,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-common"
|
name = "datafusion-common"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -3371,7 +3371,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-common-runtime"
|
name = "datafusion-common-runtime"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -3380,12 +3380,12 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-doc"
|
name = "datafusion-doc"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-execution"
|
name = "datafusion-execution"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"dashmap",
|
"dashmap",
|
||||||
@@ -3403,7 +3403,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-expr"
|
name = "datafusion-expr"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -3423,7 +3423,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-expr-common"
|
name = "datafusion-expr-common"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
@@ -3434,7 +3434,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions"
|
name = "datafusion-functions"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-buffer 54.3.1",
|
"arrow-buffer 54.3.1",
|
||||||
@@ -3463,7 +3463,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions-aggregate"
|
name = "datafusion-functions-aggregate"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -3484,7 +3484,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions-aggregate-common"
|
name = "datafusion-functions-aggregate-common"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -3496,7 +3496,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions-nested"
|
name = "datafusion-functions-nested"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-array 54.2.1",
|
"arrow-array 54.2.1",
|
||||||
@@ -3518,7 +3518,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions-table"
|
name = "datafusion-functions-table"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -3533,7 +3533,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions-window"
|
name = "datafusion-functions-window"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datafusion-doc",
|
"datafusion-doc",
|
||||||
@@ -3549,7 +3549,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-functions-window-common"
|
name = "datafusion-functions-window-common"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datafusion-physical-expr-common",
|
"datafusion-physical-expr-common",
|
||||||
@@ -3558,7 +3558,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-macros"
|
name = "datafusion-macros"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"datafusion-expr",
|
"datafusion-expr",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -3568,7 +3568,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-optimizer"
|
name = "datafusion-optimizer"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -3586,7 +3586,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-physical-expr"
|
name = "datafusion-physical-expr"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -3609,7 +3609,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-physical-expr-common"
|
name = "datafusion-physical-expr-common"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -3622,7 +3622,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-physical-optimizer"
|
name = "datafusion-physical-optimizer"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-schema 54.3.1",
|
"arrow-schema 54.3.1",
|
||||||
@@ -3643,7 +3643,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-physical-plan"
|
name = "datafusion-physical-plan"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -3673,7 +3673,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-sql"
|
name = "datafusion-sql"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-array 54.2.1",
|
"arrow-array 54.2.1",
|
||||||
@@ -3691,7 +3691,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "datafusion-substrait"
|
name = "datafusion-substrait"
|
||||||
version = "45.0.0"
|
version = "45.0.0"
|
||||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=12c0381babd52c681043957e9d6ee083a03f7646#12c0381babd52c681043957e9d6ee083a03f7646"
|
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-recursion",
|
"async-recursion",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -5133,7 +5133,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "greptime-proto"
|
name = "greptime-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=454c52634c3bac27de10bf0d85d5533eed1cf03f#454c52634c3bac27de10bf0d85d5533eed1cf03f"
|
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2dca1dc67862d7b410838aef81232274c019b3f6#2dca1dc67862d7b410838aef81232274c019b3f6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
20
Cargo.toml
20
Cargo.toml
@@ -116,15 +116,15 @@ clap = { version = "4.4", features = ["derive"] }
|
|||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "6.1"
|
dashmap = "6.1"
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||||
deadpool = "0.12"
|
deadpool = "0.12"
|
||||||
deadpool-postgres = "0.14"
|
deadpool-postgres = "0.14"
|
||||||
derive_builder = "0.20"
|
derive_builder = "0.20"
|
||||||
@@ -133,7 +133,7 @@ etcd-client = "0.14"
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "454c52634c3bac27de10bf0d85d5533eed1cf03f" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2dca1dc67862d7b410838aef81232274c019b3f6" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
|
|||||||
@@ -49,7 +49,6 @@ max_send_message_size = "512MB"
|
|||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
## - `transport`: only enable gRPC transport compression (zstd)
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||||
## - `all`: enable all compression.
|
## - `all`: enable all compression.
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
flight_compression = "arrow_ipc"
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
|
|||||||
@@ -59,7 +59,6 @@ runtime_size = 8
|
|||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
## - `transport`: only enable gRPC transport compression (zstd)
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||||
## - `all`: enable all compression.
|
## - `all`: enable all compression.
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
flight_compression = "arrow_ipc"
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
|
|||||||
@@ -58,7 +58,6 @@ where
|
|||||||
info!("{desc}, average operation cost: {cost:.2} ms");
|
info!("{desc}, average operation cost: {cost:.2} ms");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Command to benchmark table metadata operations.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct BenchTableMetadataCommand {
|
pub struct BenchTableMetadataCommand {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
|
|||||||
@@ -244,18 +244,6 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
#[snafu(display("Unsupported memory backend"))]
|
|
||||||
UnsupportedMemoryBackend {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("File path invalid: {}", msg))]
|
|
||||||
InvalidFilePath {
|
|
||||||
msg: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -274,8 +262,6 @@ impl ErrorExt for Error {
|
|||||||
| Error::ConnectEtcd { .. }
|
| Error::ConnectEtcd { .. }
|
||||||
| Error::CreateDir { .. }
|
| Error::CreateDir { .. }
|
||||||
| Error::EmptyResult { .. }
|
| Error::EmptyResult { .. }
|
||||||
| Error::InvalidFilePath { .. }
|
|
||||||
| Error::UnsupportedMemoryBackend { .. }
|
|
||||||
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
|
|||||||
@@ -50,7 +50,6 @@ enum ExportTarget {
|
|||||||
All,
|
All,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Command for exporting data from the GreptimeDB.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct ExportCommand {
|
pub struct ExportCommand {
|
||||||
/// Server address to connect
|
/// Server address to connect
|
||||||
|
|||||||
@@ -40,7 +40,6 @@ enum ImportTarget {
|
|||||||
All,
|
All,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Command to import data from a directory into a GreptimeDB instance.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct ImportCommand {
|
pub struct ImportCommand {
|
||||||
/// Server address to connect
|
/// Server address to connect
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ mod import;
|
|||||||
mod meta_snapshot;
|
mod meta_snapshot;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::{Parser, Subcommand};
|
use clap::Parser;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
pub use database::DatabaseClient;
|
pub use database::DatabaseClient;
|
||||||
use error::Result;
|
use error::Result;
|
||||||
@@ -28,7 +28,7 @@ use error::Result;
|
|||||||
pub use crate::bench::BenchTableMetadataCommand;
|
pub use crate::bench::BenchTableMetadataCommand;
|
||||||
pub use crate::export::ExportCommand;
|
pub use crate::export::ExportCommand;
|
||||||
pub use crate::import::ImportCommand;
|
pub use crate::import::ImportCommand;
|
||||||
pub use crate::meta_snapshot::{MetaCommand, MetaInfoCommand, MetaRestoreCommand, MetaSaveCommand};
|
pub use crate::meta_snapshot::{MetaRestoreCommand, MetaSnapshotCommand};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Tool: Send + Sync {
|
pub trait Tool: Send + Sync {
|
||||||
@@ -51,19 +51,3 @@ impl AttachCommand {
|
|||||||
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Subcommand for data operations like export and import.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum DataCommand {
|
|
||||||
Export(ExportCommand),
|
|
||||||
Import(ImportCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DataCommand {
|
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
DataCommand::Export(cmd) => cmd.build().await,
|
|
||||||
DataCommand::Import(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -12,11 +12,10 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::path::Path;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::{Parser, Subcommand};
|
use clap::Parser;
|
||||||
use common_base::secrets::{ExposeSecret, SecretString};
|
use common_base::secrets::{ExposeSecret, SecretString};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||||
@@ -27,50 +26,10 @@ use meta_srv::bootstrap::create_etcd_client;
|
|||||||
use meta_srv::metasrv::BackendImpl;
|
use meta_srv::metasrv::BackendImpl;
|
||||||
use object_store::services::{Fs, S3};
|
use object_store::services::{Fs, S3};
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{KvBackendNotSetSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
||||||
InvalidFilePathSnafu, KvBackendNotSetSnafu, OpenDalSnafu, S3ConfigNotSetSnafu,
|
|
||||||
UnsupportedMemoryBackendSnafu,
|
|
||||||
};
|
|
||||||
use crate::Tool;
|
use crate::Tool;
|
||||||
|
|
||||||
/// Subcommand for metadata snapshot management.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum MetaCommand {
|
|
||||||
#[clap(subcommand)]
|
|
||||||
Snapshot(MetaSnapshotCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MetaCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
MetaCommand::Snapshot(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Subcommand for metadata snapshot operations. such as save, restore and info.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum MetaSnapshotCommand {
|
|
||||||
/// Export metadata snapshot tool.
|
|
||||||
Save(MetaSaveCommand),
|
|
||||||
/// Restore metadata snapshot tool.
|
|
||||||
Restore(MetaRestoreCommand),
|
|
||||||
/// Explore metadata from metadata snapshot.
|
|
||||||
Info(MetaInfoCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MetaSnapshotCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
MetaSnapshotCommand::Save(cmd) => cmd.build().await,
|
|
||||||
MetaSnapshotCommand::Restore(cmd) => cmd.build().await,
|
|
||||||
MetaSnapshotCommand::Info(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
struct MetaConnection {
|
struct MetaConnection {
|
||||||
/// The endpoint of store. one of etcd, pg or mysql.
|
/// The endpoint of store. one of etcd, pg or mysql.
|
||||||
@@ -132,9 +91,6 @@ impl MetaConnection {
|
|||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)?)
|
.map_err(BoxedError::new)?)
|
||||||
}
|
}
|
||||||
Some(BackendImpl::MemoryStore) => UnsupportedMemoryBackendSnafu
|
|
||||||
.fail()
|
|
||||||
.map_err(BoxedError::new),
|
|
||||||
_ => KvBackendNotSetSnafu { backend: "all" }
|
_ => KvBackendNotSetSnafu { backend: "all" }
|
||||||
.fail()
|
.fail()
|
||||||
.map_err(BoxedError::new),
|
.map_err(BoxedError::new),
|
||||||
@@ -214,7 +170,7 @@ impl S3Config {
|
|||||||
/// It will dump the metadata snapshot to local file or s3 bucket.
|
/// It will dump the metadata snapshot to local file or s3 bucket.
|
||||||
/// The snapshot file will be in binary format.
|
/// The snapshot file will be in binary format.
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct MetaSaveCommand {
|
pub struct MetaSnapshotCommand {
|
||||||
/// The connection to the metadata store.
|
/// The connection to the metadata store.
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
connection: MetaConnection,
|
connection: MetaConnection,
|
||||||
@@ -240,7 +196,7 @@ fn create_local_file_object_store(root: &str) -> Result<ObjectStore, BoxedError>
|
|||||||
Ok(object_store)
|
Ok(object_store)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MetaSaveCommand {
|
impl MetaSnapshotCommand {
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||||
let kvbackend = self.connection.build().await?;
|
let kvbackend = self.connection.build().await?;
|
||||||
let output_dir = &self.output_dir;
|
let output_dir = &self.output_dir;
|
||||||
@@ -371,89 +327,3 @@ impl Tool for MetaRestoreTool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Explore metadata from metadata snapshot.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct MetaInfoCommand {
|
|
||||||
/// The s3 config.
|
|
||||||
#[clap(flatten)]
|
|
||||||
s3_config: S3Config,
|
|
||||||
/// The name of the target snapshot file. we will add the file extension automatically.
|
|
||||||
#[clap(long, default_value = "metadata_snapshot")]
|
|
||||||
file_name: String,
|
|
||||||
/// The query string to filter the metadata.
|
|
||||||
#[clap(long, default_value = "*")]
|
|
||||||
inspect_key: String,
|
|
||||||
/// The limit of the metadata to query.
|
|
||||||
#[clap(long)]
|
|
||||||
limit: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct MetaInfoTool {
|
|
||||||
inner: ObjectStore,
|
|
||||||
source_file: String,
|
|
||||||
inspect_key: String,
|
|
||||||
limit: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for MetaInfoTool {
|
|
||||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
|
||||||
let result = MetadataSnapshotManager::info(
|
|
||||||
&self.inner,
|
|
||||||
&self.source_file,
|
|
||||||
&self.inspect_key,
|
|
||||||
self.limit,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
for item in result {
|
|
||||||
println!("{}", item);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MetaInfoCommand {
|
|
||||||
fn decide_object_store_root_for_local_store(
|
|
||||||
file_path: &str,
|
|
||||||
) -> Result<(&str, &str), BoxedError> {
|
|
||||||
let path = Path::new(file_path);
|
|
||||||
let parent = path
|
|
||||||
.parent()
|
|
||||||
.and_then(|p| p.to_str())
|
|
||||||
.context(InvalidFilePathSnafu { msg: file_path })
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
let file_name = path
|
|
||||||
.file_name()
|
|
||||||
.and_then(|f| f.to_str())
|
|
||||||
.context(InvalidFilePathSnafu { msg: file_path })
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
let root = if parent.is_empty() { "." } else { parent };
|
|
||||||
Ok((root, file_name))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
let object_store = self.s3_config.build("").map_err(BoxedError::new)?;
|
|
||||||
if let Some(store) = object_store {
|
|
||||||
let tool = MetaInfoTool {
|
|
||||||
inner: store,
|
|
||||||
source_file: self.file_name.clone(),
|
|
||||||
inspect_key: self.inspect_key.clone(),
|
|
||||||
limit: self.limit,
|
|
||||||
};
|
|
||||||
Ok(Box::new(tool))
|
|
||||||
} else {
|
|
||||||
let (root, file_name) =
|
|
||||||
Self::decide_object_store_root_for_local_store(&self.file_name)?;
|
|
||||||
let object_store = create_local_file_object_store(root)?;
|
|
||||||
let tool = MetaInfoTool {
|
|
||||||
inner: object_store,
|
|
||||||
source_file: file_name.to_string(),
|
|
||||||
inspect_key: self.inspect_key.clone(),
|
|
||||||
limit: self.limit,
|
|
||||||
};
|
|
||||||
Ok(Box::new(tool))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -162,23 +162,12 @@ impl Client {
|
|||||||
.as_bytes() as usize
|
.as_bytes() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_flight_client(
|
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
||||||
&self,
|
|
||||||
send_compression: bool,
|
|
||||||
accept_compression: bool,
|
|
||||||
) -> Result<FlightClient> {
|
|
||||||
let (addr, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
|
|
||||||
let mut client = FlightServiceClient::new(channel)
|
let client = FlightServiceClient::new(channel)
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size());
|
.max_encoding_message_size(self.max_grpc_send_message_size());
|
||||||
// todo(hl): support compression methods.
|
|
||||||
if send_compression {
|
|
||||||
client = client.send_compressed(CompressionEncoding::Zstd);
|
|
||||||
}
|
|
||||||
if accept_compression {
|
|
||||||
client = client.accept_compressed(CompressionEncoding::Zstd);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(FlightClient { addr, client })
|
Ok(FlightClient { addr, client })
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,16 +49,7 @@ impl NodeManager for NodeClients {
|
|||||||
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||||
let client = self.get_client(datanode).await;
|
let client = self.get_client(datanode).await;
|
||||||
|
|
||||||
let ChannelConfig {
|
Arc::new(RegionRequester::new(client))
|
||||||
send_compression,
|
|
||||||
accept_compression,
|
|
||||||
..
|
|
||||||
} = self.channel_manager.config();
|
|
||||||
Arc::new(RegionRequester::new(
|
|
||||||
client,
|
|
||||||
*send_compression,
|
|
||||||
*accept_compression,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
||||||
|
|||||||
@@ -287,7 +287,7 @@ impl Database {
|
|||||||
let mut request = tonic::Request::new(request);
|
let mut request = tonic::Request::new(request);
|
||||||
Self::put_hints(request.metadata_mut(), hints)?;
|
Self::put_hints(request.metadata_mut(), hints)?;
|
||||||
|
|
||||||
let mut client = self.client.make_flight_client(false, false)?;
|
let mut client = self.client.make_flight_client()?;
|
||||||
|
|
||||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||||
let tonic_code = e.code();
|
let tonic_code = e.code();
|
||||||
@@ -409,7 +409,7 @@ impl Database {
|
|||||||
MetadataValue::from_str(db_to_put).context(InvalidTonicMetadataValueSnafu)?,
|
MetadataValue::from_str(db_to_put).context(InvalidTonicMetadataValueSnafu)?,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut client = self.client.make_flight_client(false, false)?;
|
let mut client = self.client.make_flight_client()?;
|
||||||
let response = client.mut_inner().do_put(request).await?;
|
let response = client.mut_inner().do_put(request).await?;
|
||||||
let response = response
|
let response = response
|
||||||
.into_inner()
|
.into_inner()
|
||||||
|
|||||||
@@ -46,8 +46,6 @@ use crate::{metrics, Client, Error};
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct RegionRequester {
|
pub struct RegionRequester {
|
||||||
client: Client,
|
client: Client,
|
||||||
send_compression: bool,
|
|
||||||
accept_compression: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -91,18 +89,12 @@ impl Datanode for RegionRequester {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RegionRequester {
|
impl RegionRequester {
|
||||||
pub fn new(client: Client, send_compression: bool, accept_compression: bool) -> Self {
|
pub fn new(client: Client) -> Self {
|
||||||
Self {
|
Self { client }
|
||||||
client,
|
|
||||||
send_compression,
|
|
||||||
accept_compression,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn do_get_inner(&self, ticket: Ticket) -> Result<SendableRecordBatchStream> {
|
pub async fn do_get_inner(&self, ticket: Ticket) -> Result<SendableRecordBatchStream> {
|
||||||
let mut flight_client = self
|
let mut flight_client = self.client.make_flight_client()?;
|
||||||
.client
|
|
||||||
.make_flight_client(self.send_compression, self.accept_compression)?;
|
|
||||||
let response = flight_client
|
let response = flight_client
|
||||||
.mut_inner()
|
.mut_inner()
|
||||||
.do_get(ticket)
|
.do_get(ticket)
|
||||||
|
|||||||
@@ -146,7 +146,6 @@ mod tests {
|
|||||||
let output_dir = tempfile::tempdir().unwrap();
|
let output_dir = tempfile::tempdir().unwrap();
|
||||||
let cli = cli::Command::parse_from([
|
let cli = cli::Command::parse_from([
|
||||||
"cli",
|
"cli",
|
||||||
"data",
|
|
||||||
"export",
|
"export",
|
||||||
"--addr",
|
"--addr",
|
||||||
"127.0.0.1:4000",
|
"127.0.0.1:4000",
|
||||||
|
|||||||
@@ -364,16 +364,12 @@ impl StartCommand {
|
|||||||
|
|
||||||
// frontend to datanode need not timeout.
|
// frontend to datanode need not timeout.
|
||||||
// Some queries are expected to take long time.
|
// Some queries are expected to take long time.
|
||||||
let mut channel_config = ChannelConfig {
|
let channel_config = ChannelConfig {
|
||||||
timeout: None,
|
timeout: None,
|
||||||
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
tcp_nodelay: opts.datanode.client.tcp_nodelay,
|
||||||
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
connect_timeout: Some(opts.datanode.client.connect_timeout),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
if opts.grpc.flight_compression.transport_compression() {
|
|
||||||
channel_config.accept_compression = true;
|
|
||||||
channel_config.send_compression = true;
|
|
||||||
}
|
|
||||||
let client = NodeClients::new(channel_config);
|
let client = NodeClients::new(channel_config);
|
||||||
|
|
||||||
let instance = FrontendBuilder::new(
|
let instance = FrontendBuilder::new(
|
||||||
|
|||||||
90
src/common/function/src/adjust_flow.rs
Normal file
90
src/common/function/src/adjust_flow.rs
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use common_macro::admin_fn;
|
||||||
|
use common_query::error::{
|
||||||
|
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||||
|
};
|
||||||
|
use common_query::prelude::Signature;
|
||||||
|
use datafusion::logical_expr::Volatility;
|
||||||
|
use datatypes::value::{Value, ValueRef};
|
||||||
|
use session::context::QueryContextRef;
|
||||||
|
use snafu::ensure;
|
||||||
|
use store_api::storage::ConcreteDataType;
|
||||||
|
|
||||||
|
use crate::handlers::FlowServiceHandlerRef;
|
||||||
|
use crate::helper::parse_catalog_flow;
|
||||||
|
|
||||||
|
fn adjust_signature() -> Signature {
|
||||||
|
Signature::exact(
|
||||||
|
vec![
|
||||||
|
ConcreteDataType::string_datatype(), // flow name
|
||||||
|
ConcreteDataType::uint64_datatype(), // min_run_interval in seconds
|
||||||
|
ConcreteDataType::uint64_datatype(), // max filter number per query
|
||||||
|
],
|
||||||
|
Volatility::Immutable,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_fn(
|
||||||
|
name = AdjustFlowFunction,
|
||||||
|
display_name = adjust_flow,
|
||||||
|
sig_fn = adjust_signature,
|
||||||
|
ret = uint64
|
||||||
|
)]
|
||||||
|
pub(crate) async fn adjust_flow(
|
||||||
|
flow_service_handler: &FlowServiceHandlerRef,
|
||||||
|
query_ctx: &QueryContextRef,
|
||||||
|
params: &[ValueRef<'_>],
|
||||||
|
) -> Result<Value> {
|
||||||
|
ensure!(
|
||||||
|
params.len() == 3,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect 3, have: {}",
|
||||||
|
params.len()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let (flow_name, min_run_interval, max_filter_num) = match (params[0], params[1], params[2]) {
|
||||||
|
(
|
||||||
|
ValueRef::String(flow_name),
|
||||||
|
ValueRef::UInt64(min_run_interval),
|
||||||
|
ValueRef::UInt64(max_filter_num),
|
||||||
|
) => (flow_name, min_run_interval, max_filter_num),
|
||||||
|
_ => {
|
||||||
|
return UnsupportedInputDataTypeSnafu {
|
||||||
|
function: "adjust_flow",
|
||||||
|
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||||
|
|
||||||
|
let res = flow_service_handler
|
||||||
|
.adjust(
|
||||||
|
&catalog_name,
|
||||||
|
&flow_name,
|
||||||
|
min_run_interval,
|
||||||
|
max_filter_num as usize,
|
||||||
|
query_ctx.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let affected_rows = res.affected_rows;
|
||||||
|
|
||||||
|
Ok(Value::from(affected_rows))
|
||||||
|
}
|
||||||
@@ -26,6 +26,7 @@ use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
|||||||
use migrate_region::MigrateRegionFunction;
|
use migrate_region::MigrateRegionFunction;
|
||||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||||
|
|
||||||
|
use crate::adjust_flow::AdjustFlowFunction;
|
||||||
use crate::flush_flow::FlushFlowFunction;
|
use crate::flush_flow::FlushFlowFunction;
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
@@ -43,5 +44,6 @@ impl AdminFunction {
|
|||||||
registry.register_async(Arc::new(FlushTableFunction));
|
registry.register_async(Arc::new(FlushTableFunction));
|
||||||
registry.register_async(Arc::new(CompactTableFunction));
|
registry.register_async(Arc::new(CompactTableFunction));
|
||||||
registry.register_async(Arc::new(FlushFlowFunction));
|
registry.register_async(Arc::new(FlushFlowFunction));
|
||||||
|
registry.register_async(Arc::new(AdjustFlowFunction));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,21 +12,19 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_macro::admin_fn;
|
use common_macro::admin_fn;
|
||||||
use common_query::error::{
|
use common_query::error::{
|
||||||
ExecuteSnafu, InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result,
|
InvalidFuncArgsSnafu, MissingFlowServiceHandlerSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||||
UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
};
|
||||||
use common_query::prelude::Signature;
|
use common_query::prelude::Signature;
|
||||||
use datafusion::logical_expr::Volatility;
|
use datafusion::logical_expr::Volatility;
|
||||||
use datatypes::value::{Value, ValueRef};
|
use datatypes::value::{Value, ValueRef};
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::ensure;
|
||||||
use sql::parser::ParserContext;
|
|
||||||
use store_api::storage::ConcreteDataType;
|
use store_api::storage::ConcreteDataType;
|
||||||
|
|
||||||
use crate::handlers::FlowServiceHandlerRef;
|
use crate::handlers::FlowServiceHandlerRef;
|
||||||
|
use crate::helper::parse_catalog_flow;
|
||||||
|
|
||||||
fn flush_signature() -> Signature {
|
fn flush_signature() -> Signature {
|
||||||
Signature::uniform(
|
Signature::uniform(
|
||||||
@@ -47,20 +45,6 @@ pub(crate) async fn flush_flow(
|
|||||||
query_ctx: &QueryContextRef,
|
query_ctx: &QueryContextRef,
|
||||||
params: &[ValueRef<'_>],
|
params: &[ValueRef<'_>],
|
||||||
) -> Result<Value> {
|
) -> Result<Value> {
|
||||||
let (catalog_name, flow_name) = parse_flush_flow(params, query_ctx)?;
|
|
||||||
|
|
||||||
let res = flow_service_handler
|
|
||||||
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
|
||||||
.await?;
|
|
||||||
let affected_rows = res.affected_rows;
|
|
||||||
|
|
||||||
Ok(Value::from(affected_rows))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_flush_flow(
|
|
||||||
params: &[ValueRef<'_>],
|
|
||||||
query_ctx: &QueryContextRef,
|
|
||||||
) -> Result<(String, String)> {
|
|
||||||
ensure!(
|
ensure!(
|
||||||
params.len() == 1,
|
params.len() == 1,
|
||||||
InvalidFuncArgsSnafu {
|
InvalidFuncArgsSnafu {
|
||||||
@@ -70,7 +54,6 @@ fn parse_flush_flow(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
let ValueRef::String(flow_name) = params[0] else {
|
let ValueRef::String(flow_name) = params[0] else {
|
||||||
return UnsupportedInputDataTypeSnafu {
|
return UnsupportedInputDataTypeSnafu {
|
||||||
function: "flush_flow",
|
function: "flush_flow",
|
||||||
@@ -78,27 +61,14 @@ fn parse_flush_flow(
|
|||||||
}
|
}
|
||||||
.fail();
|
.fail();
|
||||||
};
|
};
|
||||||
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
let (catalog_name, flow_name) = parse_catalog_flow(flow_name, query_ctx)?;
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExecuteSnafu)?;
|
|
||||||
|
|
||||||
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
let res = flow_service_handler
|
||||||
[flow_name] => (
|
.flush(&catalog_name, &flow_name, query_ctx.clone())
|
||||||
query_ctx.current_catalog().to_string(),
|
.await?;
|
||||||
flow_name.value.clone(),
|
let affected_rows = res.affected_rows;
|
||||||
),
|
|
||||||
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
Ok(Value::from(affected_rows))
|
||||||
_ => {
|
|
||||||
return InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
|
||||||
obj_name
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok((catalog_name, flow_name))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -154,10 +124,7 @@ mod test {
|
|||||||
("catalog.flow_name", ("catalog", "flow_name")),
|
("catalog.flow_name", ("catalog", "flow_name")),
|
||||||
];
|
];
|
||||||
for (input, expected) in testcases.iter() {
|
for (input, expected) in testcases.iter() {
|
||||||
let args = vec![*input];
|
let result = parse_catalog_flow(input, &QueryContext::arc()).unwrap();
|
||||||
let args = args.into_iter().map(ValueRef::String).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let result = parse_flush_flow(&args, &QueryContext::arc()).unwrap();
|
|
||||||
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
assert_eq!(*expected, (result.0.as_str(), result.1.as_str()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -87,6 +87,15 @@ pub trait FlowServiceHandler: Send + Sync {
|
|||||||
flow: &str,
|
flow: &str,
|
||||||
ctx: QueryContextRef,
|
ctx: QueryContextRef,
|
||||||
) -> Result<api::v1::flow::FlowResponse>;
|
) -> Result<api::v1::flow::FlowResponse>;
|
||||||
|
|
||||||
|
async fn adjust(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
flow: &str,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
|
||||||
|
|||||||
@@ -12,12 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_query::error::{InvalidInputTypeSnafu, Result};
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::error::{ExecuteSnafu, InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result};
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::types::cast::cast;
|
use datatypes::types::cast::cast;
|
||||||
use datatypes::value::ValueRef;
|
use datatypes::value::ValueRef;
|
||||||
|
use session::context::QueryContextRef;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use sql::parser::ParserContext;
|
||||||
|
|
||||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||||
@@ -43,3 +46,30 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
|||||||
})
|
})
|
||||||
.map(|v| v.as_u64())
|
.map(|v| v.as_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn parse_catalog_flow(
|
||||||
|
flow_name: &str,
|
||||||
|
query_ctx: &QueryContextRef,
|
||||||
|
) -> Result<(String, String)> {
|
||||||
|
let obj_name = ParserContext::parse_table_name(flow_name, query_ctx.sql_dialect())
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(ExecuteSnafu)?;
|
||||||
|
|
||||||
|
let (catalog_name, flow_name) = match &obj_name.0[..] {
|
||||||
|
[flow_name] => (
|
||||||
|
query_ctx.current_catalog().to_string(),
|
||||||
|
flow_name.value.clone(),
|
||||||
|
),
|
||||||
|
[catalog, flow_name] => (catalog.value.clone(), flow_name.value.clone()),
|
||||||
|
_ => {
|
||||||
|
return InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"expect flow name to be <catalog>.<flow-name> or <flow-name>, actual: {}",
|
||||||
|
obj_name
|
||||||
|
),
|
||||||
|
}
|
||||||
|
.fail()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok((catalog_name, flow_name))
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
|
|
||||||
|
mod adjust_flow;
|
||||||
mod admin;
|
mod admin;
|
||||||
mod flush_flow;
|
mod flush_flow;
|
||||||
mod macros;
|
mod macros;
|
||||||
|
|||||||
@@ -148,6 +148,17 @@ impl FunctionState {
|
|||||||
) -> Result<api::v1::flow::FlowResponse> {
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn adjust(
|
||||||
|
&self,
|
||||||
|
_catalog: &str,
|
||||||
|
_flow: &str,
|
||||||
|
_min_run_interval_secs: u64,
|
||||||
|
_max_filter_num_per_query: usize,
|
||||||
|
_ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
|||||||
@@ -296,8 +296,6 @@ pub struct ChannelConfig {
|
|||||||
pub max_recv_message_size: ReadableSize,
|
pub max_recv_message_size: ReadableSize,
|
||||||
// Max gRPC sending(encoding) message size
|
// Max gRPC sending(encoding) message size
|
||||||
pub max_send_message_size: ReadableSize,
|
pub max_send_message_size: ReadableSize,
|
||||||
pub send_compression: bool,
|
|
||||||
pub accept_compression: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ChannelConfig {
|
impl Default for ChannelConfig {
|
||||||
@@ -318,8 +316,6 @@ impl Default for ChannelConfig {
|
|||||||
client_tls: None,
|
client_tls: None,
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
send_compression: false,
|
|
||||||
accept_compression: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -570,8 +566,6 @@ mod tests {
|
|||||||
client_tls: None,
|
client_tls: None,
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
send_compression: false,
|
|
||||||
accept_compression: false,
|
|
||||||
},
|
},
|
||||||
default_cfg
|
default_cfg
|
||||||
);
|
);
|
||||||
@@ -616,8 +610,6 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||||
send_compression: false,
|
|
||||||
accept_compression: false,
|
|
||||||
},
|
},
|
||||||
cfg
|
cfg
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
pub mod file;
|
pub mod file;
|
||||||
|
|
||||||
use std::borrow::Cow;
|
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
@@ -272,49 +271,6 @@ impl MetadataSnapshotManager {
|
|||||||
|
|
||||||
Ok((filename.to_string(), num_keyvalues as u64))
|
Ok((filename.to_string(), num_keyvalues as u64))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn format_output(key: Cow<'_, str>, value: Cow<'_, str>) -> String {
|
|
||||||
format!("{} => {}", key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn info(
|
|
||||||
object_store: &ObjectStore,
|
|
||||||
file_path: &str,
|
|
||||||
query_str: &str,
|
|
||||||
limit: Option<usize>,
|
|
||||||
) -> Result<Vec<String>> {
|
|
||||||
let path = Path::new(file_path);
|
|
||||||
|
|
||||||
let file_name = path
|
|
||||||
.file_name()
|
|
||||||
.and_then(|s| s.to_str())
|
|
||||||
.context(InvalidFilePathSnafu { file_path })?;
|
|
||||||
|
|
||||||
let filename = FileName::try_from(file_name)?;
|
|
||||||
let data = object_store
|
|
||||||
.read(file_path)
|
|
||||||
.await
|
|
||||||
.context(ReadObjectSnafu { file_path })?;
|
|
||||||
let document = Document::from_slice(&filename.extension.format, &data.to_bytes())?;
|
|
||||||
let metadata_content = document.into_metadata_content()?.values();
|
|
||||||
let mut results = Vec::with_capacity(limit.unwrap_or(256));
|
|
||||||
for kv in metadata_content {
|
|
||||||
let key_str = String::from_utf8_lossy(&kv.key);
|
|
||||||
if let Some(prefix) = query_str.strip_suffix('*') {
|
|
||||||
if key_str.starts_with(prefix) {
|
|
||||||
let value_str = String::from_utf8_lossy(&kv.value);
|
|
||||||
results.push(Self::format_output(key_str, value_str));
|
|
||||||
}
|
|
||||||
} else if key_str == query_str {
|
|
||||||
let value_str = String::from_utf8_lossy(&kv.value);
|
|
||||||
results.push(Self::format_output(key_str, value_str));
|
|
||||||
}
|
|
||||||
if results.len() == limit.unwrap_or(usize::MAX) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(results)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -111,11 +111,6 @@ impl MetadataContent {
|
|||||||
pub fn into_iter(self) -> impl Iterator<Item = KeyValue> {
|
pub fn into_iter(self) -> impl Iterator<Item = KeyValue> {
|
||||||
self.values.into_iter()
|
self.values.into_iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the key-value pairs as a vector.
|
|
||||||
pub fn values(self) -> Vec<KeyValue> {
|
|
||||||
self.values
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The key-value pair of the backup file.
|
/// The key-value pair of the backup file.
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ prost.workspace = true
|
|||||||
query.workspace = true
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
servers.workspace = true
|
servers.workspace = true
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
smallvec.workspace = true
|
smallvec.workspace = true
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use std::sync::atomic::AtomicBool;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::flow::{
|
use api::v1::flow::{
|
||||||
flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
flow_request, AdjustFlow, CreateRequest, DropRequest, FlowRequest, FlowResponse, FlushFlow,
|
||||||
};
|
};
|
||||||
use api::v1::region::InsertRequests;
|
use api::v1::region::InsertRequests;
|
||||||
use catalog::CatalogManager;
|
use catalog::CatalogManager;
|
||||||
@@ -32,6 +32,7 @@ use common_telemetry::{error, info, trace, warn};
|
|||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use session::context::QueryContextBuilder;
|
use session::context::QueryContextBuilder;
|
||||||
use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
use snafu::{ensure, IntoError, OptionExt, ResultExt};
|
||||||
use store_api::storage::{RegionId, TableId};
|
use store_api::storage::{RegionId, TableId};
|
||||||
@@ -822,6 +823,25 @@ impl common_meta::node_manager::Flownode for FlowDualEngine {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Some(flow_request::Body::Adjust(AdjustFlow { flow_id, options })) => {
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct Options {
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
}
|
||||||
|
let options: Options = serde_json::from_str(&options).with_context(|_| {
|
||||||
|
common_meta::error::DeserializeFromJsonSnafu { input: options }
|
||||||
|
})?;
|
||||||
|
self.batching_engine
|
||||||
|
.adjust_flow(
|
||||||
|
flow_id.unwrap().id as u64,
|
||||||
|
options.min_run_interval_secs,
|
||||||
|
options.max_filter_num_per_query,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(to_meta_err(snafu::location!()))?;
|
||||||
|
Ok(Default::default())
|
||||||
|
}
|
||||||
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -854,93 +874,6 @@ fn to_meta_err(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl common_meta::node_manager::Flownode for StreamingEngine {
|
|
||||||
async fn handle(&self, request: FlowRequest) -> MetaResult<FlowResponse> {
|
|
||||||
let query_ctx = request
|
|
||||||
.header
|
|
||||||
.and_then(|h| h.query_context)
|
|
||||||
.map(|ctx| ctx.into());
|
|
||||||
match request.body {
|
|
||||||
Some(flow_request::Body::Create(CreateRequest {
|
|
||||||
flow_id: Some(task_id),
|
|
||||||
source_table_ids,
|
|
||||||
sink_table_name: Some(sink_table_name),
|
|
||||||
create_if_not_exists,
|
|
||||||
expire_after,
|
|
||||||
comment,
|
|
||||||
sql,
|
|
||||||
flow_options,
|
|
||||||
or_replace,
|
|
||||||
})) => {
|
|
||||||
let source_table_ids = source_table_ids.into_iter().map(|id| id.id).collect_vec();
|
|
||||||
let sink_table_name = [
|
|
||||||
sink_table_name.catalog_name,
|
|
||||||
sink_table_name.schema_name,
|
|
||||||
sink_table_name.table_name,
|
|
||||||
];
|
|
||||||
let expire_after = expire_after.map(|e| e.value);
|
|
||||||
let args = CreateFlowArgs {
|
|
||||||
flow_id: task_id.id as u64,
|
|
||||||
sink_table_name,
|
|
||||||
source_table_ids,
|
|
||||||
create_if_not_exists,
|
|
||||||
or_replace,
|
|
||||||
expire_after,
|
|
||||||
comment: Some(comment),
|
|
||||||
sql: sql.clone(),
|
|
||||||
flow_options,
|
|
||||||
query_ctx,
|
|
||||||
};
|
|
||||||
let ret = self
|
|
||||||
.create_flow(args)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.with_context(|_| CreateFlowSnafu { sql: sql.clone() })
|
|
||||||
.map_err(to_meta_err(snafu::location!()))?;
|
|
||||||
METRIC_FLOW_TASK_COUNT.inc();
|
|
||||||
Ok(FlowResponse {
|
|
||||||
affected_flows: ret
|
|
||||||
.map(|id| greptime_proto::v1::FlowId { id: id as u32 })
|
|
||||||
.into_iter()
|
|
||||||
.collect_vec(),
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Some(flow_request::Body::Drop(DropRequest {
|
|
||||||
flow_id: Some(flow_id),
|
|
||||||
})) => {
|
|
||||||
self.remove_flow(flow_id.id as u64)
|
|
||||||
.await
|
|
||||||
.map_err(to_meta_err(snafu::location!()))?;
|
|
||||||
METRIC_FLOW_TASK_COUNT.dec();
|
|
||||||
Ok(Default::default())
|
|
||||||
}
|
|
||||||
Some(flow_request::Body::Flush(FlushFlow {
|
|
||||||
flow_id: Some(flow_id),
|
|
||||||
})) => {
|
|
||||||
let row = self
|
|
||||||
.flush_flow_inner(flow_id.id as u64)
|
|
||||||
.await
|
|
||||||
.map_err(to_meta_err(snafu::location!()))?;
|
|
||||||
Ok(FlowResponse {
|
|
||||||
affected_flows: vec![flow_id],
|
|
||||||
affected_rows: row as u64,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
other => common_meta::error::InvalidFlowRequestBodySnafu { body: other }.fail(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_inserts(&self, request: InsertRequests) -> MetaResult<FlowResponse> {
|
|
||||||
self.handle_inserts_inner(request)
|
|
||||||
.await
|
|
||||||
.map(|_| Default::default())
|
|
||||||
.map_err(to_meta_err(snafu::location!()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FlowEngine for StreamingEngine {
|
impl FlowEngine for StreamingEngine {
|
||||||
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||||
self.create_flow_inner(args).await
|
self.create_flow_inner(args).await
|
||||||
|
|||||||
@@ -42,7 +42,6 @@ use crate::error::{
|
|||||||
ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, TableNotFoundMetaSnafu,
|
ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, TableNotFoundMetaSnafu,
|
||||||
UnexpectedSnafu, UnsupportedSnafu,
|
UnexpectedSnafu, UnsupportedSnafu,
|
||||||
};
|
};
|
||||||
use crate::metrics::METRIC_FLOW_ROWS;
|
|
||||||
use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
use crate::{CreateFlowArgs, Error, FlowId, TableName};
|
||||||
|
|
||||||
/// Batching mode Engine, responsible for driving all the batching mode tasks
|
/// Batching mode Engine, responsible for driving all the batching mode tasks
|
||||||
@@ -156,10 +155,6 @@ impl BatchingEngine {
|
|||||||
let Some(expr) = &task.config.time_window_expr else {
|
let Some(expr) = &task.config.time_window_expr else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let row_cnt: usize = entry.iter().map(|rows| rows.rows.len()).sum();
|
|
||||||
METRIC_FLOW_ROWS
|
|
||||||
.with_label_values(&[&format!("{}-batching-in", task.config.flow_id)])
|
|
||||||
.inc_by(row_cnt as u64);
|
|
||||||
let involved_time_windows = expr.handle_rows(entry.clone()).await?;
|
let involved_time_windows = expr.handle_rows(entry.clone()).await?;
|
||||||
let mut state = task.state.write().unwrap();
|
let mut state = task.state.write().unwrap();
|
||||||
state
|
state
|
||||||
@@ -393,6 +388,20 @@ impl BatchingEngine {
|
|||||||
pub async fn flow_exist_inner(&self, flow_id: FlowId) -> bool {
|
pub async fn flow_exist_inner(&self, flow_id: FlowId) -> bool {
|
||||||
self.tasks.read().await.contains_key(&flow_id)
|
self.tasks.read().await.contains_key(&flow_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn adjust_flow(
|
||||||
|
&self,
|
||||||
|
flow_id: FlowId,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let task = self.tasks.read().await.get(&flow_id).cloned();
|
||||||
|
let task = task.with_context(|| FlowNotFoundSnafu { id: flow_id })?;
|
||||||
|
debug!("Adjusting flow {flow_id} with min_run_interval_secs={} and max_filter_num_per_query={}", min_run_interval_secs, max_filter_num_per_query);
|
||||||
|
task.adjust(min_run_interval_secs, max_filter_num_per_query);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowEngine for BatchingEngine {
|
impl FlowEngine for BatchingEngine {
|
||||||
|
|||||||
@@ -14,8 +14,9 @@
|
|||||||
|
|
||||||
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::collections::HashMap;
|
||||||
use std::time::SystemTime;
|
use std::sync::{Arc, Mutex, Weak};
|
||||||
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
use api::v1::greptime_request::Request;
|
use api::v1::greptime_request::Request;
|
||||||
use api::v1::CreateTableExpr;
|
use api::v1::CreateTableExpr;
|
||||||
@@ -26,20 +27,21 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
|||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::store::RangeRequest;
|
use common_meta::rpc::store::RangeRequest;
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_telemetry::warn;
|
use common_telemetry::{debug, warn};
|
||||||
|
use itertools::Itertools;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use rand::rng;
|
|
||||||
use rand::seq::SliceRandom;
|
|
||||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
|
use crate::batching_mode::task::BatchingTask;
|
||||||
use crate::batching_mode::{
|
use crate::batching_mode::{
|
||||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
|
||||||
GRPC_MAX_RETRIES,
|
GRPC_MAX_RETRIES,
|
||||||
};
|
};
|
||||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
|
||||||
use crate::{Error, FlowAuthHeader};
|
use crate::metrics::METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD;
|
||||||
|
use crate::{Error, FlowAuthHeader, FlowId};
|
||||||
|
|
||||||
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
/// Just like [`GrpcQueryHandler`] but use BoxedError
|
||||||
///
|
///
|
||||||
@@ -74,6 +76,105 @@ impl<
|
|||||||
|
|
||||||
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
|
type HandlerMutable = Arc<std::sync::Mutex<Option<Weak<dyn GrpcQueryHandlerWithBoxedError>>>>;
|
||||||
|
|
||||||
|
/// Statistics about running query on this frontend from flownode
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
struct FrontendStat {
|
||||||
|
/// The query for flow id has been running since this timestamp
|
||||||
|
since: HashMap<FlowId, Instant>,
|
||||||
|
/// The average query time for each flow id
|
||||||
|
/// This is used to calculate the average query time for each flow id
|
||||||
|
past_query_avg: HashMap<FlowId, (usize, Duration)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
pub struct FrontendStats {
|
||||||
|
/// The statistics for each flow id
|
||||||
|
stats: Arc<Mutex<HashMap<String, FrontendStat>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrontendStats {
|
||||||
|
pub fn observe(&self, frontend_addr: &str, flow_id: FlowId) -> FrontendStatsGuard {
|
||||||
|
let mut stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||||
|
let stat = stats.entry(frontend_addr.to_string()).or_default();
|
||||||
|
stat.since.insert(flow_id, Instant::now());
|
||||||
|
|
||||||
|
FrontendStatsGuard {
|
||||||
|
stats: self.stats.clone(),
|
||||||
|
frontend_addr: frontend_addr.to_string(),
|
||||||
|
cur: flow_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// return frontend addrs sorted by load, from lightest to heaviest
|
||||||
|
/// The load is calculated as the total average query time for each flow id plus running query's total running time elapsed
|
||||||
|
pub fn sort_by_load(&self) -> Vec<String> {
|
||||||
|
let stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||||
|
let fe_load_factor = stats
|
||||||
|
.iter()
|
||||||
|
.map(|(node_addr, stat)| {
|
||||||
|
// total expected avg running time for all currently running queries
|
||||||
|
let total_expect_avg_run_time = stat
|
||||||
|
.since
|
||||||
|
.keys()
|
||||||
|
.map(|f| {
|
||||||
|
let (count, total_duration) =
|
||||||
|
stat.past_query_avg.get(f).unwrap_or(&(0, Duration::ZERO));
|
||||||
|
if *count == 0 {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
total_duration.as_secs_f64() / *count as f64
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.sum::<f64>();
|
||||||
|
let total_cur_running_time = stat
|
||||||
|
.since
|
||||||
|
.values()
|
||||||
|
.map(|since| since.elapsed().as_secs_f64())
|
||||||
|
.sum::<f64>();
|
||||||
|
(
|
||||||
|
node_addr.to_string(),
|
||||||
|
total_expect_avg_run_time + total_cur_running_time,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.sorted_by(|(_, load_a), (_, load_b)| {
|
||||||
|
load_a
|
||||||
|
.partial_cmp(load_b)
|
||||||
|
.unwrap_or(std::cmp::Ordering::Equal)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
debug!("Frontend load factor: {:?}", fe_load_factor);
|
||||||
|
for (node_addr, load) in &fe_load_factor {
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD
|
||||||
|
.with_label_values(&[&node_addr.to_string()])
|
||||||
|
.observe(*load);
|
||||||
|
}
|
||||||
|
fe_load_factor
|
||||||
|
.into_iter()
|
||||||
|
.map(|(addr, _)| addr)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FrontendStatsGuard {
|
||||||
|
stats: Arc<Mutex<HashMap<String, FrontendStat>>>,
|
||||||
|
frontend_addr: String,
|
||||||
|
cur: FlowId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for FrontendStatsGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let mut stats = self.stats.lock().expect("Failed to lock frontend stats");
|
||||||
|
if let Some(stat) = stats.get_mut(&self.frontend_addr) {
|
||||||
|
if let Some(since) = stat.since.remove(&self.cur) {
|
||||||
|
let elapsed = since.elapsed();
|
||||||
|
let (count, total_duration) = stat.past_query_avg.entry(self.cur).or_default();
|
||||||
|
*count += 1;
|
||||||
|
*total_duration += elapsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A simple frontend client able to execute sql using grpc protocol
|
/// A simple frontend client able to execute sql using grpc protocol
|
||||||
///
|
///
|
||||||
/// This is for computation-heavy query which need to offload computation to frontend, lifting the load from flownode
|
/// This is for computation-heavy query which need to offload computation to frontend, lifting the load from flownode
|
||||||
@@ -83,6 +184,7 @@ pub enum FrontendClient {
|
|||||||
meta_client: Arc<MetaClient>,
|
meta_client: Arc<MetaClient>,
|
||||||
chnl_mgr: ChannelManager,
|
chnl_mgr: ChannelManager,
|
||||||
auth: Option<FlowAuthHeader>,
|
auth: Option<FlowAuthHeader>,
|
||||||
|
fe_stats: FrontendStats,
|
||||||
},
|
},
|
||||||
Standalone {
|
Standalone {
|
||||||
/// for the sake of simplicity still use grpc even in standalone mode
|
/// for the sake of simplicity still use grpc even in standalone mode
|
||||||
@@ -114,6 +216,7 @@ impl FrontendClient {
|
|||||||
ChannelManager::with_config(cfg)
|
ChannelManager::with_config(cfg)
|
||||||
},
|
},
|
||||||
auth,
|
auth,
|
||||||
|
fe_stats: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,7 +286,7 @@ impl FrontendClient {
|
|||||||
|
|
||||||
/// Get the frontend with recent enough(less than 1 minute from now) `last_activity_ts`
|
/// Get the frontend with recent enough(less than 1 minute from now) `last_activity_ts`
|
||||||
/// and is able to process query
|
/// and is able to process query
|
||||||
async fn get_random_active_frontend(
|
pub(crate) async fn get_random_active_frontend(
|
||||||
&self,
|
&self,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
schema: &str,
|
schema: &str,
|
||||||
@@ -192,6 +295,7 @@ impl FrontendClient {
|
|||||||
meta_client: _,
|
meta_client: _,
|
||||||
chnl_mgr,
|
chnl_mgr,
|
||||||
auth,
|
auth,
|
||||||
|
fe_stats,
|
||||||
} = self
|
} = self
|
||||||
else {
|
else {
|
||||||
return UnexpectedSnafu {
|
return UnexpectedSnafu {
|
||||||
@@ -208,8 +312,21 @@ impl FrontendClient {
|
|||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_millis() as i64;
|
.as_millis() as i64;
|
||||||
// shuffle the frontends to avoid always pick the same one
|
let node_addrs_by_load = fe_stats.sort_by_load();
|
||||||
frontends.shuffle(&mut rng());
|
// index+1 to load order asc, so that the lightest node has load 1 and non-existent node has load 0
|
||||||
|
let addr2load = node_addrs_by_load
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, id)| (id.clone(), i + 1))
|
||||||
|
.collect::<HashMap<_, _>>();
|
||||||
|
// sort frontends by load, from lightest to heaviest
|
||||||
|
frontends.sort_by(|(_, a), (_, b)| {
|
||||||
|
// if not even in stats, treat as 0 load since never been queried
|
||||||
|
let load_a = addr2load.get(&a.peer.addr).unwrap_or(&0);
|
||||||
|
let load_b = addr2load.get(&b.peer.addr).unwrap_or(&0);
|
||||||
|
load_a.cmp(load_b)
|
||||||
|
});
|
||||||
|
debug!("Frontend nodes sorted by load: {:?}", frontends);
|
||||||
|
|
||||||
// found node with maximum last_activity_ts
|
// found node with maximum last_activity_ts
|
||||||
for (_, node_info) in frontends
|
for (_, node_info) in frontends
|
||||||
@@ -257,6 +374,7 @@ impl FrontendClient {
|
|||||||
create: CreateTableExpr,
|
create: CreateTableExpr,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
schema: &str,
|
schema: &str,
|
||||||
|
task: Option<&BatchingTask>,
|
||||||
) -> Result<u32, Error> {
|
) -> Result<u32, Error> {
|
||||||
self.handle(
|
self.handle(
|
||||||
Request::Ddl(api::v1::DdlRequest {
|
Request::Ddl(api::v1::DdlRequest {
|
||||||
@@ -264,7 +382,8 @@ impl FrontendClient {
|
|||||||
}),
|
}),
|
||||||
catalog,
|
catalog,
|
||||||
schema,
|
schema,
|
||||||
&mut None,
|
None,
|
||||||
|
task,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@@ -275,15 +394,31 @@ impl FrontendClient {
|
|||||||
req: api::v1::greptime_request::Request,
|
req: api::v1::greptime_request::Request,
|
||||||
catalog: &str,
|
catalog: &str,
|
||||||
schema: &str,
|
schema: &str,
|
||||||
peer_desc: &mut Option<PeerDesc>,
|
use_peer: Option<Peer>,
|
||||||
|
task: Option<&BatchingTask>,
|
||||||
) -> Result<u32, Error> {
|
) -> Result<u32, Error> {
|
||||||
match self {
|
match self {
|
||||||
FrontendClient::Distributed { .. } => {
|
FrontendClient::Distributed {
|
||||||
let db = self.get_random_active_frontend(catalog, schema).await?;
|
fe_stats, chnl_mgr, ..
|
||||||
|
} => {
|
||||||
|
let db = if let Some(peer) = use_peer {
|
||||||
|
DatabaseWithPeer::new(
|
||||||
|
Database::new(
|
||||||
|
catalog,
|
||||||
|
schema,
|
||||||
|
Client::with_manager_and_urls(
|
||||||
|
chnl_mgr.clone(),
|
||||||
|
vec![peer.addr.clone()],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
peer,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
self.get_random_active_frontend(catalog, schema).await?
|
||||||
|
};
|
||||||
|
|
||||||
*peer_desc = Some(PeerDesc::Dist {
|
let flow_id = task.map(|t| t.config.flow_id).unwrap_or_default();
|
||||||
peer: db.peer.clone(),
|
let _guard = fe_stats.observe(&db.peer.addr, flow_id);
|
||||||
});
|
|
||||||
|
|
||||||
db.database
|
db.database
|
||||||
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ use crate::batching_mode::MIN_REFRESH_DURATION;
|
|||||||
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu, UnexpectedSnafu};
|
use crate::error::{DatatypesSnafu, InternalSnafu, TimeSnafu, UnexpectedSnafu};
|
||||||
use crate::metrics::{
|
use crate::metrics::{
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE, METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT,
|
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE, METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT,
|
||||||
METRIC_FLOW_BATCHING_ENGINE_WAIT_TIME,
|
METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT,
|
||||||
};
|
};
|
||||||
use crate::{Error, FlowId};
|
use crate::{Error, FlowId};
|
||||||
|
|
||||||
@@ -53,6 +53,13 @@ pub struct TaskState {
|
|||||||
pub(crate) shutdown_rx: oneshot::Receiver<()>,
|
pub(crate) shutdown_rx: oneshot::Receiver<()>,
|
||||||
/// Task handle
|
/// Task handle
|
||||||
pub(crate) task_handle: Option<tokio::task::JoinHandle<()>>,
|
pub(crate) task_handle: Option<tokio::task::JoinHandle<()>>,
|
||||||
|
/// Slow Query metrics update task handle
|
||||||
|
pub(crate) slow_query_metric_task: Option<tokio::task::JoinHandle<()>>,
|
||||||
|
|
||||||
|
/// min run interval in seconds
|
||||||
|
pub(crate) min_run_interval: Option<u64>,
|
||||||
|
/// max filter number per query
|
||||||
|
pub(crate) max_filter_num: Option<usize>,
|
||||||
}
|
}
|
||||||
impl TaskState {
|
impl TaskState {
|
||||||
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
||||||
@@ -64,6 +71,9 @@ impl TaskState {
|
|||||||
exec_state: ExecState::Idle,
|
exec_state: ExecState::Idle,
|
||||||
shutdown_rx,
|
shutdown_rx,
|
||||||
task_handle: None,
|
task_handle: None,
|
||||||
|
slow_query_metric_task: None,
|
||||||
|
min_run_interval: None,
|
||||||
|
max_filter_num: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,34 +98,20 @@ impl TaskState {
|
|||||||
pub fn get_next_start_query_time(
|
pub fn get_next_start_query_time(
|
||||||
&self,
|
&self,
|
||||||
flow_id: FlowId,
|
flow_id: FlowId,
|
||||||
time_window_size: &Option<Duration>,
|
_time_window_size: &Option<Duration>,
|
||||||
max_timeout: Option<Duration>,
|
max_timeout: Option<Duration>,
|
||||||
) -> Instant {
|
) -> Instant {
|
||||||
let last_duration = max_timeout
|
let next_duration = max_timeout
|
||||||
.unwrap_or(self.last_query_duration)
|
.unwrap_or(self.last_query_duration)
|
||||||
.min(self.last_query_duration)
|
.min(self.last_query_duration)
|
||||||
.max(MIN_REFRESH_DURATION);
|
.max(
|
||||||
|
self.min_run_interval
|
||||||
let next_duration = time_window_size
|
.map(Duration::from_secs)
|
||||||
.map(|t| {
|
.unwrap_or(MIN_REFRESH_DURATION),
|
||||||
let half = t / 2;
|
);
|
||||||
half.max(last_duration)
|
|
||||||
})
|
|
||||||
.unwrap_or(last_duration);
|
|
||||||
|
|
||||||
METRIC_FLOW_BATCHING_ENGINE_WAIT_TIME
|
|
||||||
.with_label_values(&[
|
|
||||||
flow_id.to_string().as_str(),
|
|
||||||
time_window_size
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_secs_f64()
|
|
||||||
.to_string()
|
|
||||||
.as_str(),
|
|
||||||
])
|
|
||||||
.observe(next_duration.as_secs_f64());
|
|
||||||
|
|
||||||
// if have dirty time window, execute immediately to clean dirty time window
|
// if have dirty time window, execute immediately to clean dirty time window
|
||||||
/*if self.dirty_time_windows.windows.is_empty() {
|
if self.dirty_time_windows.windows.is_empty() {
|
||||||
self.last_update_time + next_duration
|
self.last_update_time + next_duration
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
@@ -125,9 +121,7 @@ impl TaskState {
|
|||||||
self.dirty_time_windows.windows
|
self.dirty_time_windows.windows
|
||||||
);
|
);
|
||||||
Instant::now()
|
Instant::now()
|
||||||
}*/
|
}
|
||||||
|
|
||||||
self.last_update_time + next_duration
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -269,9 +263,19 @@ impl DirtyTimeWindows {
|
|||||||
self.windows = new_windows;
|
self.windows = new_windows;
|
||||||
|
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT
|
METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT
|
||||||
.with_label_values(&[flow_id.to_string().as_str()])
|
.with_label_values(&[
|
||||||
|
flow_id.to_string().as_str(),
|
||||||
|
format!("{}", window_size).as_str(),
|
||||||
|
])
|
||||||
.observe(to_be_query.len() as f64);
|
.observe(to_be_query.len() as f64);
|
||||||
|
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT
|
||||||
|
.with_label_values(&[
|
||||||
|
flow_id.to_string().as_str(),
|
||||||
|
format!("{}", window_size).as_str(),
|
||||||
|
])
|
||||||
|
.observe(self.windows.len() as f64);
|
||||||
|
|
||||||
let full_time_range = to_be_query
|
let full_time_range = to_be_query
|
||||||
.iter()
|
.iter()
|
||||||
.fold(chrono::Duration::zero(), |acc, (start, end)| {
|
.fold(chrono::Duration::zero(), |acc, (start, end)| {
|
||||||
@@ -283,7 +287,10 @@ impl DirtyTimeWindows {
|
|||||||
})
|
})
|
||||||
.num_seconds() as f64;
|
.num_seconds() as f64;
|
||||||
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE
|
METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME_RANGE
|
||||||
.with_label_values(&[flow_id.to_string().as_str()])
|
.with_label_values(&[
|
||||||
|
flow_id.to_string().as_str(),
|
||||||
|
format!("{}", window_size).as_str(),
|
||||||
|
])
|
||||||
.observe(full_time_range);
|
.observe(full_time_range);
|
||||||
|
|
||||||
let mut expr_lst = vec![];
|
let mut expr_lst = vec![];
|
||||||
|
|||||||
@@ -62,8 +62,8 @@ use crate::error::{
|
|||||||
};
|
};
|
||||||
use crate::metrics::{
|
use crate::metrics::{
|
||||||
METRIC_FLOW_BATCHING_ENGINE_ERROR_CNT, METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME,
|
METRIC_FLOW_BATCHING_ENGINE_ERROR_CNT, METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME,
|
||||||
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY, METRIC_FLOW_BATCHING_ENGINE_START_QUERY_CNT,
|
METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT, METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY,
|
||||||
METRIC_FLOW_ROWS,
|
METRIC_FLOW_BATCHING_ENGINE_START_QUERY_CNT, METRIC_FLOW_ROWS,
|
||||||
};
|
};
|
||||||
use crate::{Error, FlowId};
|
use crate::{Error, FlowId};
|
||||||
|
|
||||||
@@ -83,6 +83,14 @@ pub struct TaskConfig {
|
|||||||
query_type: QueryType,
|
query_type: QueryType,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TaskConfig {
|
||||||
|
pub fn time_window_size(&self) -> Option<Duration> {
|
||||||
|
self.time_window_expr
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|expr| *expr.time_window_size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn determine_query_type(query: &str, query_ctx: &QueryContextRef) -> Result<QueryType, Error> {
|
fn determine_query_type(query: &str, query_ctx: &QueryContextRef) -> Result<QueryType, Error> {
|
||||||
let stmts =
|
let stmts =
|
||||||
ParserContext::create_with_dialect(query, query_ctx.sql_dialect(), ParseOptions::default())
|
ParserContext::create_with_dialect(query, query_ctx.sql_dialect(), ParseOptions::default())
|
||||||
@@ -146,6 +154,12 @@ impl BatchingTask {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn adjust(&self, min_run_interval_secs: u64, max_filter_num_per_query: usize) {
|
||||||
|
let mut state = self.state.write().unwrap();
|
||||||
|
state.min_run_interval = Some(min_run_interval_secs);
|
||||||
|
state.max_filter_num = Some(max_filter_num_per_query);
|
||||||
|
}
|
||||||
|
|
||||||
/// mark time window range (now - expire_after, now) as dirty (or (0, now) if expire_after not set)
|
/// mark time window range (now - expire_after, now) as dirty (or (0, now) if expire_after not set)
|
||||||
///
|
///
|
||||||
/// useful for flush_flow to flush dirty time windows range
|
/// useful for flush_flow to flush dirty time windows range
|
||||||
@@ -282,7 +296,7 @@ impl BatchingTask {
|
|||||||
let catalog = &self.config.sink_table_name[0];
|
let catalog = &self.config.sink_table_name[0];
|
||||||
let schema = &self.config.sink_table_name[1];
|
let schema = &self.config.sink_table_name[1];
|
||||||
frontend_client
|
frontend_client
|
||||||
.create(expr.clone(), catalog, schema)
|
.create(expr.clone(), catalog, schema, Some(self))
|
||||||
.await?;
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -330,11 +344,53 @@ impl BatchingTask {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let plan = expanded_plan;
|
let plan = expanded_plan;
|
||||||
let mut peer_desc = None;
|
|
||||||
|
let db = frontend_client
|
||||||
|
.get_random_active_frontend(catalog, schema)
|
||||||
|
.await?;
|
||||||
|
let peer_desc = db.peer.clone();
|
||||||
|
|
||||||
|
let (tx, mut rx) = oneshot::channel();
|
||||||
|
let peer_inner = peer_desc.clone();
|
||||||
|
let window_size_pretty = format!(
|
||||||
|
"{}s",
|
||||||
|
self.config.time_window_size().unwrap_or_default().as_secs()
|
||||||
|
);
|
||||||
|
let inner_window_size_pretty = window_size_pretty.clone();
|
||||||
|
let flow_id = self.config.flow_id;
|
||||||
|
let slow_query_metric_task = tokio::task::spawn(async move {
|
||||||
|
tokio::time::sleep(SLOW_QUERY_THRESHOLD).await;
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT
|
||||||
|
.with_label_values(&[
|
||||||
|
flow_id.to_string().as_str(),
|
||||||
|
&peer_inner.to_string(),
|
||||||
|
inner_window_size_pretty.as_str(),
|
||||||
|
])
|
||||||
|
.add(1.0);
|
||||||
|
while rx.try_recv() == Err(TryRecvError::Empty) {
|
||||||
|
// sleep for a while before next update
|
||||||
|
tokio::time::sleep(MIN_REFRESH_DURATION).await;
|
||||||
|
}
|
||||||
|
METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT
|
||||||
|
.with_label_values(&[
|
||||||
|
flow_id.to_string().as_str(),
|
||||||
|
&peer_inner.to_string(),
|
||||||
|
inner_window_size_pretty.as_str(),
|
||||||
|
])
|
||||||
|
.sub(1.0);
|
||||||
|
});
|
||||||
|
self.state.write().unwrap().slow_query_metric_task = Some(slow_query_metric_task);
|
||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let _timer = METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME
|
let _timer = METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME
|
||||||
.with_label_values(&[flow_id.to_string().as_str()])
|
.with_label_values(&[
|
||||||
|
flow_id.to_string().as_str(),
|
||||||
|
format!(
|
||||||
|
"{}s",
|
||||||
|
self.config.time_window_size().unwrap_or_default().as_secs()
|
||||||
|
)
|
||||||
|
.as_str(),
|
||||||
|
])
|
||||||
.start_timer();
|
.start_timer();
|
||||||
|
|
||||||
// hack and special handling the insert logical plan
|
// hack and special handling the insert logical plan
|
||||||
@@ -363,10 +419,12 @@ impl BatchingTask {
|
|||||||
};
|
};
|
||||||
|
|
||||||
frontend_client
|
frontend_client
|
||||||
.handle(req, catalog, schema, &mut peer_desc)
|
.handle(req, catalog, schema, Some(db.peer), Some(self))
|
||||||
.await
|
.await
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// signaling the slow query metric task to stop
|
||||||
|
let _ = tx.send(());
|
||||||
let elapsed = instant.elapsed();
|
let elapsed = instant.elapsed();
|
||||||
if let Ok(affected_rows) = &res {
|
if let Ok(affected_rows) = &res {
|
||||||
debug!(
|
debug!(
|
||||||
@@ -392,7 +450,12 @@ impl BatchingTask {
|
|||||||
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY
|
METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY
|
||||||
.with_label_values(&[
|
.with_label_values(&[
|
||||||
flow_id.to_string().as_str(),
|
flow_id.to_string().as_str(),
|
||||||
&peer_desc.unwrap_or_default().to_string(),
|
&peer_desc.to_string(),
|
||||||
|
format!(
|
||||||
|
"{}s",
|
||||||
|
self.config.time_window_size().unwrap_or_default().as_secs()
|
||||||
|
)
|
||||||
|
.as_str(),
|
||||||
])
|
])
|
||||||
.observe(elapsed.as_secs_f64());
|
.observe(elapsed.as_secs_f64());
|
||||||
}
|
}
|
||||||
@@ -592,19 +655,20 @@ impl BatchingTask {
|
|||||||
),
|
),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let expr = self
|
let expr = {
|
||||||
.state
|
let mut state = self.state.write().unwrap();
|
||||||
.write()
|
let max_window_cnt = state
|
||||||
.unwrap()
|
.max_filter_num
|
||||||
.dirty_time_windows
|
.unwrap_or(DirtyTimeWindows::MAX_FILTER_NUM);
|
||||||
.gen_filter_exprs(
|
state.dirty_time_windows.gen_filter_exprs(
|
||||||
&col_name,
|
&col_name,
|
||||||
Some(l),
|
Some(l),
|
||||||
window_size,
|
window_size,
|
||||||
DirtyTimeWindows::MAX_FILTER_NUM,
|
max_window_cnt,
|
||||||
self.config.flow_id,
|
self.config.flow_id,
|
||||||
Some(self),
|
Some(self),
|
||||||
)?;
|
)?
|
||||||
|
};
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Flow id={:?}, Generated filter expr: {:?}",
|
"Flow id={:?}, Generated filter expr: {:?}",
|
||||||
|
|||||||
@@ -31,29 +31,37 @@ lazy_static! {
|
|||||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME: HistogramVec = register_histogram_vec!(
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_TIME: HistogramVec = register_histogram_vec!(
|
||||||
"greptime_flow_batching_engine_query_time_secs",
|
"greptime_flow_batching_engine_query_time_secs",
|
||||||
"flow batching engine query time(seconds)",
|
"flow batching engine query time(seconds)",
|
||||||
&["flow_id"],
|
&["flow_id", "time_window_granularity"],
|
||||||
vec![0.0, 5., 10., 20., 40., 80., 160., 320., 640.,]
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_WAIT_TIME: HistogramVec = register_histogram_vec!(
|
|
||||||
"greptime_flow_batching_engine_wait_time_secs",
|
|
||||||
"flow batching engine wait time between query(seconds)",
|
|
||||||
&["flow_id", "time_window_size"],
|
|
||||||
vec![0.0, 5., 10., 20., 40., 80., 160., 320., 640.,]
|
vec![0.0, 5., 10., 20., 40., 80., 160., 320., 640.,]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY: HistogramVec = register_histogram_vec!(
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_SLOW_QUERY: HistogramVec = register_histogram_vec!(
|
||||||
"greptime_flow_batching_engine_slow_query_secs",
|
"greptime_flow_batching_engine_slow_query_secs",
|
||||||
"flow batching engine slow query(seconds)",
|
"flow batching engine slow query(seconds), updated after query finished",
|
||||||
&["flow_id", "peer"],
|
&["flow_id", "peer", "time_window_granularity"],
|
||||||
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_REAL_TIME_SLOW_QUERY_CNT: GaugeVec =
|
||||||
|
register_gauge_vec!(
|
||||||
|
"greptime_flow_batching_engine_real_time_slow_query_number",
|
||||||
|
"flow batching engine real time slow query number, updated in real time",
|
||||||
|
&["flow_id", "peer", "time_window_granularity"],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_STALLED_QUERY_WINDOW_CNT: HistogramVec =
|
||||||
|
register_histogram_vec!(
|
||||||
|
"greptime_flow_batching_engine_stalled_query_window_cnt",
|
||||||
|
"flow batching engine stalled query time window count",
|
||||||
|
&["flow_id", "time_window_granularity"],
|
||||||
|
vec![0.0, 5., 10., 20., 40.]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT: HistogramVec =
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_QUERY_WINDOW_CNT: HistogramVec =
|
||||||
register_histogram_vec!(
|
register_histogram_vec!(
|
||||||
"greptime_flow_batching_engine_query_window_cnt",
|
"greptime_flow_batching_engine_query_window_cnt",
|
||||||
"flow batching engine query time window count",
|
"flow batching engine query time window count",
|
||||||
&["flow_id"],
|
&["flow_id", "time_window_granularity"],
|
||||||
vec![0.0, 5., 10., 20., 40.]
|
vec![0.0, 5., 10., 20., 40.]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -61,7 +69,15 @@ lazy_static! {
|
|||||||
register_histogram_vec!(
|
register_histogram_vec!(
|
||||||
"greptime_flow_batching_engine_query_time_range_secs",
|
"greptime_flow_batching_engine_query_time_range_secs",
|
||||||
"flow batching engine query time range(seconds)",
|
"flow batching engine query time range(seconds)",
|
||||||
&["flow_id"],
|
&["flow_id", "time_window_granularity"],
|
||||||
|
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_FLOW_BATCHING_ENGINE_GUESS_FE_LOAD: HistogramVec =
|
||||||
|
register_histogram_vec!(
|
||||||
|
"greptime_flow_batching_engine_guess_fe_load",
|
||||||
|
"flow batching engine guessed frontend load",
|
||||||
|
&["fe_addr"],
|
||||||
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
vec![60., 4. * 60., 16. * 60., 64. * 60., 256. * 60.]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::flow::FlowRequestHeader;
|
use api::v1::flow::{AdjustFlow, FlowRequestHeader};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_function::handlers::FlowServiceHandler;
|
use common_function::handlers::FlowServiceHandler;
|
||||||
@@ -22,6 +22,7 @@ use common_query::error::Result;
|
|||||||
use common_telemetry::tracing_context::TracingContext;
|
use common_telemetry::tracing_context::TracingContext;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use serde_json::json;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
@@ -57,9 +58,96 @@ impl FlowServiceHandler for FlowServiceOperator {
|
|||||||
) -> Result<api::v1::flow::FlowResponse> {
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
self.flush_inner(catalog, flow, ctx).await
|
self.flush_inner(catalog, flow, ctx).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn adjust(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
flow: &str,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
|
self.adjust_inner(
|
||||||
|
catalog,
|
||||||
|
flow,
|
||||||
|
min_run_interval_secs,
|
||||||
|
max_filter_num_per_query,
|
||||||
|
ctx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowServiceOperator {
|
impl FlowServiceOperator {
|
||||||
|
async fn adjust_inner(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
flow: &str,
|
||||||
|
min_run_interval_secs: u64,
|
||||||
|
max_filter_num_per_query: usize,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<api::v1::flow::FlowResponse> {
|
||||||
|
let id = self
|
||||||
|
.flow_metadata_manager
|
||||||
|
.flow_name_manager()
|
||||||
|
.get(catalog, flow)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?
|
||||||
|
.context(common_meta::error::FlowNotFoundSnafu {
|
||||||
|
flow_name: format!("{}.{}", catalog, flow),
|
||||||
|
})
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?
|
||||||
|
.flow_id();
|
||||||
|
|
||||||
|
let all_flownode_peers = self
|
||||||
|
.flow_metadata_manager
|
||||||
|
.flow_route_manager()
|
||||||
|
.routes(id)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?;
|
||||||
|
|
||||||
|
// order of flownodes doesn't matter here
|
||||||
|
let all_flow_nodes = FuturesUnordered::from_iter(
|
||||||
|
all_flownode_peers
|
||||||
|
.iter()
|
||||||
|
.map(|(_key, peer)| self.node_manager.flownode(peer.peer())),
|
||||||
|
)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// TODO(discord9): use proper type for flow options
|
||||||
|
let options = json!({
|
||||||
|
"min_run_interval_secs": min_run_interval_secs,
|
||||||
|
"max_filter_num_per_query": max_filter_num_per_query,
|
||||||
|
});
|
||||||
|
|
||||||
|
for node in all_flow_nodes {
|
||||||
|
let _res = {
|
||||||
|
use api::v1::flow::{flow_request, FlowRequest};
|
||||||
|
let flush_req = FlowRequest {
|
||||||
|
header: Some(FlowRequestHeader {
|
||||||
|
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||||
|
query_context: Some(
|
||||||
|
common_meta::rpc::ddl::QueryContext::from(ctx.clone()).into(),
|
||||||
|
),
|
||||||
|
}),
|
||||||
|
body: Some(flow_request::Body::Adjust(AdjustFlow {
|
||||||
|
flow_id: Some(api::v1::FlowId { id }),
|
||||||
|
options: options.to_string(),
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
node.handle(flush_req)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(common_query::error::ExecuteSnafu)?
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Ok(Default::default())
|
||||||
|
}
|
||||||
|
|
||||||
/// Flush the flownodes according to the flow id.
|
/// Flush the flownodes according to the flow id.
|
||||||
async fn flush_inner(
|
async fn flush_inner(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -13,17 +13,20 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use cli::{BenchTableMetadataCommand, DataCommand, MetaCommand, Tool};
|
use cli::{
|
||||||
|
BenchTableMetadataCommand, ExportCommand, ImportCommand, MetaRestoreCommand,
|
||||||
|
MetaSnapshotCommand, Tool,
|
||||||
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub enum SubCommand {
|
pub enum SubCommand {
|
||||||
// Attach(AttachCommand),
|
// Attach(AttachCommand),
|
||||||
Bench(BenchTableMetadataCommand),
|
Bench(BenchTableMetadataCommand),
|
||||||
#[clap(subcommand)]
|
Export(ExportCommand),
|
||||||
Data(DataCommand),
|
Import(ImportCommand),
|
||||||
#[clap(subcommand)]
|
MetaSnapshot(MetaSnapshotCommand),
|
||||||
Meta(MetaCommand),
|
MetaRestore(MetaRestoreCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
@@ -31,8 +34,10 @@ impl SubCommand {
|
|||||||
match self {
|
match self {
|
||||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||||
SubCommand::Bench(cmd) => cmd.build().await,
|
SubCommand::Bench(cmd) => cmd.build().await,
|
||||||
SubCommand::Data(cmd) => cmd.build().await,
|
SubCommand::Export(cmd) => cmd.build().await,
|
||||||
SubCommand::Meta(cmd) => cmd.build().await,
|
SubCommand::Import(cmd) => cmd.build().await,
|
||||||
|
SubCommand::MetaSnapshot(cmd) => cmd.build().await,
|
||||||
|
SubCommand::MetaRestore(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2444,7 +2444,7 @@ impl PromPlanner {
|
|||||||
LogicalPlanBuilder::from(left)
|
LogicalPlanBuilder::from(left)
|
||||||
.alias(left_table_ref)
|
.alias(left_table_ref)
|
||||||
.context(DataFusionPlanningSnafu)?
|
.context(DataFusionPlanningSnafu)?
|
||||||
.join_detailed(
|
.join(
|
||||||
right,
|
right,
|
||||||
JoinType::Inner,
|
JoinType::Inner,
|
||||||
(
|
(
|
||||||
@@ -2458,7 +2458,6 @@ impl PromPlanner {
|
|||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
),
|
),
|
||||||
None,
|
None,
|
||||||
true,
|
|
||||||
)
|
)
|
||||||
.context(DataFusionPlanningSnafu)?
|
.context(DataFusionPlanningSnafu)?
|
||||||
.build()
|
.build()
|
||||||
|
|||||||
@@ -139,11 +139,11 @@ impl GrpcOptions {
|
|||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum FlightCompression {
|
pub enum FlightCompression {
|
||||||
/// Disable all compression in Arrow Flight service.
|
/// Disable all compression in Arrow Flight service.
|
||||||
#[default]
|
|
||||||
None,
|
None,
|
||||||
/// Enable only transport layer compression (zstd).
|
/// Enable only transport layer compression (zstd).
|
||||||
Transport,
|
Transport,
|
||||||
/// Enable only payload compression (lz4)
|
/// Enable only payload compression (lz4)
|
||||||
|
#[default]
|
||||||
ArrowIpc,
|
ArrowIpc,
|
||||||
/// Enable all compression.
|
/// Enable all compression.
|
||||||
All,
|
All,
|
||||||
|
|||||||
@@ -675,16 +675,11 @@ insert into cache_miss_with_null_label values
|
|||||||
Affected Rows: 4
|
Affected Rows: 4
|
||||||
|
|
||||||
-- SQLNESS SORT_RESULT 3 1
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
-- null!=null, so it will returns the empty set.
|
||||||
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
||||||
|
|
||||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
++
|
||||||
| job | null_label | ts | lhs.greptime_value / rhs.cache_miss_with_null_label.greptime_value + cache_hit_with_null_label.greptime_value |
|
++
|
||||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
|
||||||
| read | | 1970-01-01T00:00:03 | 0.5 |
|
|
||||||
| read | | 1970-01-01T00:00:04 | 0.75 |
|
|
||||||
| write | | 1970-01-01T00:00:03 | 0.5 |
|
|
||||||
| write | | 1970-01-01T00:00:04 | 0.6666666666666666 |
|
|
||||||
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
|
||||||
|
|
||||||
-- SQLNESS SORT_RESULT 3 1
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
tql eval (3, 4, '1s') cache_hit_with_null_label / ignoring(null_label) (cache_miss_with_null_label + ignoring(null_label) cache_hit_with_null_label);
|
tql eval (3, 4, '1s') cache_hit_with_null_label / ignoring(null_label) (cache_miss_with_null_label + ignoring(null_label) cache_hit_with_null_label);
|
||||||
|
|||||||
@@ -325,6 +325,7 @@ insert into cache_miss_with_null_label values
|
|||||||
(4000, "write", null, 2.0);
|
(4000, "write", null, 2.0);
|
||||||
|
|
||||||
-- SQLNESS SORT_RESULT 3 1
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
-- null!=null, so it will returns the empty set.
|
||||||
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
||||||
|
|
||||||
-- SQLNESS SORT_RESULT 3 1
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
|||||||
Reference in New Issue
Block a user