mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-28 00:42:56 +00:00
Compare commits
4 Commits
oldrelease
...
v0.14.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2df38d0d1 | ||
|
|
66e2242e46 | ||
|
|
489b16ae30 | ||
|
|
85d564b0fb |
231
Cargo.lock
generated
231
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -915,7 +915,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1537,7 +1537,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1561,7 +1561,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -1619,9 +1619,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.1.24"
|
||||
version = "1.2.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
|
||||
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@@ -1874,7 +1874,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1917,7 +1917,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1926,7 +1926,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1955,7 +1955,7 @@ dependencies = [
|
||||
"rand 0.9.0",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1996,7 +1996,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -2056,7 +2056,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -2102,7 +2102,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -2124,11 +2124,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2153,7 +2153,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -2190,7 +2190,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.8",
|
||||
"common-error",
|
||||
@@ -2203,7 +2203,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-macro",
|
||||
"http 1.1.0",
|
||||
@@ -2214,7 +2214,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2224,7 +2224,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2277,7 +2277,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2294,7 +2294,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2325,7 +2325,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2344,7 +2344,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2358,7 +2358,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2371,7 +2371,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2432,7 +2432,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2441,11 +2441,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2457,7 +2457,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2484,7 +2484,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2493,7 +2493,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2510,7 +2510,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser_derive 0.1.1",
|
||||
"statrs",
|
||||
"store-api",
|
||||
@@ -2519,7 +2519,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2539,7 +2539,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2569,14 +2569,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-session"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"strum 0.27.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2604,7 +2604,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2616,7 +2616,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -2634,7 +2634,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2644,7 +2644,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2946,9 +2946,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.13"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
@@ -3117,7 +3117,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3168,7 +3168,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-catalog"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"async-trait",
|
||||
@@ -3188,7 +3188,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-catalog-listing"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -3211,7 +3211,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3236,7 +3236,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-common-runtime"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"log",
|
||||
"tokio",
|
||||
@@ -3245,12 +3245,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-doc"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-execution"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"dashmap",
|
||||
@@ -3268,7 +3268,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -3288,7 +3288,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-expr-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"datafusion-common",
|
||||
@@ -3299,7 +3299,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-buffer 54.3.1",
|
||||
@@ -3328,7 +3328,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3349,7 +3349,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3361,7 +3361,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-nested"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3383,7 +3383,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-table"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"async-trait",
|
||||
@@ -3398,7 +3398,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-window"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-doc",
|
||||
@@ -3414,7 +3414,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-functions-window-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-physical-expr-common",
|
||||
@@ -3423,7 +3423,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-macros"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"datafusion-expr",
|
||||
"quote",
|
||||
@@ -3433,7 +3433,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -3451,7 +3451,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3474,7 +3474,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr-common"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3487,7 +3487,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-optimizer"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -3508,7 +3508,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-physical-plan"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"arrow 54.2.1",
|
||||
@@ -3538,7 +3538,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3556,7 +3556,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "datafusion-substrait"
|
||||
version = "45.0.0"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5bbedc6704162afb03478f56ffb629405a4e1220#5bbedc6704162afb03478f56ffb629405a4e1220"
|
||||
source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=e104c7cf62b11dd5fe41461b82514978234326b4#e104c7cf62b11dd5fe41461b82514978234326b4"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -3572,7 +3572,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3624,7 +3624,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3633,7 +3633,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -3656,7 +3656,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser_derive 0.1.1",
|
||||
]
|
||||
|
||||
@@ -4259,7 +4259,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4382,7 +4382,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -4444,7 +4444,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4499,7 +4499,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4553,10 +4553,10 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -5795,7 +5795,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6509,7 +6509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6605,7 +6605,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6617,7 +6617,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6911,7 +6911,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6939,7 +6939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7029,7 +7029,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7118,7 +7118,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7824,7 +7824,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -8119,7 +8119,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8166,9 +8166,9 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8423,7 +8423,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8443,7 +8443,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"table",
|
||||
]
|
||||
@@ -8705,7 +8705,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8847,7 +8847,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -9127,7 +9127,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9373,7 +9373,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9414,7 +9414,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9477,10 +9477,10 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10005,15 +10005,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.8"
|
||||
version = "0.17.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
|
||||
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"getrandom 0.2.15",
|
||||
"libc",
|
||||
"spin",
|
||||
"untrusted",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
@@ -10831,7 +10830,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -10951,7 +10950,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11276,7 +11275,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11304,7 +11303,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlparser_derive 0.1.1",
|
||||
"store-api",
|
||||
"table",
|
||||
@@ -11331,7 +11330,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11373,7 +11372,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.54.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089#e98e6b322426a9d397a71efef17075966223c089"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
@@ -11381,7 +11380,7 @@ dependencies = [
|
||||
"regex",
|
||||
"serde",
|
||||
"sqlparser 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser_derive 0.3.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -11409,7 +11408,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089#e98e6b322426a9d397a71efef17075966223c089"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e#0cf6c04490d59435ee965edd2078e8855bd8471e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -11650,7 +11649,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11799,7 +11798,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11979,7 +11978,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12230,7 +12229,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12264,7 +12263,7 @@ dependencies = [
|
||||
"serde_yaml",
|
||||
"snafu 0.8.5",
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e98e6b322426a9d397a71efef17075966223c089)",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
@@ -12274,7 +12273,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12341,7 +12340,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.14.1",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
22
Cargo.toml
22
Cargo.toml
@@ -68,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -112,15 +112,15 @@ clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5bbedc6704162afb03478f56ffb629405a4e1220" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "e104c7cf62b11dd5fe41461b82514978234326b4" }
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
@@ -191,7 +191,7 @@ simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e98e6b322426a9d397a71efef17075966223c089", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0cf6c04490d59435ee965edd2078e8855bd8471e", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.54.x"
|
||||
|
||||
@@ -13,10 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -26,6 +24,5 @@ pub(crate) struct TimestampFunction;
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,328 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
|
||||
use common_query::error::{
|
||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datafusion::arrow::compute::kernels::cmp::gt;
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{
|
||||
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GreatestFunction;
|
||||
|
||||
const NAME: &str = "greatest";
|
||||
|
||||
macro_rules! gt_time_types {
|
||||
($ty: ident, $columns:expr) => {{
|
||||
let column1 = $columns[0].to_arrow_array();
|
||||
let column2 = $columns[1].to_arrow_array();
|
||||
|
||||
let column1 = column1.as_primitive::<$ty>();
|
||||
let column2 = column2.as_primitive::<$ty>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
|
||||
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
|
||||
}};
|
||||
}
|
||||
|
||||
impl Function for GreatestFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
ensure!(
|
||||
input_types.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
input_types.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
match &input_types[0] {
|
||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
|
||||
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
|
||||
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: input_types,
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
2,
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let column1 = cast(
|
||||
&columns[0].to_arrow_array(),
|
||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<TimestampMillisecondType>();
|
||||
let column2 = cast(
|
||||
&columns[1].to_arrow_array(),
|
||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<TimestampMillisecondType>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
|
||||
ConcreteDataType::Timestamp(ts_type) => match ts_type {
|
||||
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
|
||||
TimestampType::Millisecond(_) => {
|
||||
gt_time_types!(TimestampMillisecondType, columns)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
gt_time_types!(TimestampMicrosecondType, columns)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
gt_time_types!(TimestampNanosecondType, columns)
|
||||
}
|
||||
},
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GreatestFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "GREATEST")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, Timestamp};
|
||||
use datatypes::types::{
|
||||
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
|
||||
TimestampSecondType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
"1970-01-01".to_string(),
|
||||
"2012-12-23".to_string(),
|
||||
])) as _,
|
||||
Arc::new(StringVector::from(vec![
|
||||
"2001-02-01".to_string(),
|
||||
"1999-01-01".to_string(),
|
||||
])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMillisecondVector>()
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_date_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::date_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str_utc("1970-01-01").unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_datetime_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMillisecondVector>()
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! test_timestamp {
|
||||
($type: expr,$unit: ident) => {
|
||||
paste! {
|
||||
#[test]
|
||||
fn [<test_greatest_takes_ $unit:lower _vector>]() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[$type, $type]).unwrap(),
|
||||
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(&FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
Nanosecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
Microsecond
|
||||
);
|
||||
test_timestamp!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
Millisecond
|
||||
);
|
||||
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
|
||||
}
|
||||
@@ -115,6 +115,13 @@ impl Function for UddSketchCalcFunction {
|
||||
}
|
||||
};
|
||||
|
||||
// Check if the sketch is empty, if so, return null
|
||||
// This is important to avoid panics when calling estimate_quantile on an empty sketch
|
||||
// In practice, this will happen if input is all null
|
||||
if sketch.bucket_iter().count() == 0 {
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
// Compute the estimated quantile from the sketch
|
||||
let result = sketch.estimate_quantile(perc);
|
||||
builder.push(Some(result));
|
||||
|
||||
@@ -32,3 +32,9 @@ pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
||||
|
||||
/// The minimum duration between two queries execution by batching mode task
|
||||
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
||||
|
||||
/// Grpc connection timeout
|
||||
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
/// Grpc max retry number
|
||||
const GRPC_MAX_RETRIES: u32 = 3;
|
||||
|
||||
@@ -25,12 +25,15 @@ use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::store::RangeRequest;
|
||||
use common_query::Output;
|
||||
use common_telemetry::warn;
|
||||
use meta_client::client::MetaClient;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use session::context::{QueryContextBuilder, QueryContextRef};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::batching_mode::DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT;
|
||||
use crate::batching_mode::{
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, GRPC_CONN_TIMEOUT, GRPC_MAX_RETRIES,
|
||||
};
|
||||
use crate::error::{ExternalSnafu, InvalidRequestSnafu, UnexpectedSnafu};
|
||||
use crate::Error;
|
||||
|
||||
@@ -99,7 +102,9 @@ impl FrontendClient {
|
||||
Self::Distributed {
|
||||
meta_client,
|
||||
chnl_mgr: {
|
||||
let cfg = ChannelConfig::new().timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
|
||||
let cfg = ChannelConfig::new()
|
||||
.connect_timeout(GRPC_CONN_TIMEOUT)
|
||||
.timeout(DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT);
|
||||
ChannelManager::with_config(cfg)
|
||||
},
|
||||
}
|
||||
@@ -223,12 +228,32 @@ impl FrontendClient {
|
||||
peer: db.peer.clone(),
|
||||
});
|
||||
|
||||
db.database
|
||||
.handle(req.clone())
|
||||
.await
|
||||
.with_context(|_| InvalidRequestSnafu {
|
||||
context: format!("Failed to handle request: {:?}", req),
|
||||
})
|
||||
let mut retry = 0;
|
||||
|
||||
loop {
|
||||
let ret = db.database.handle(req.clone()).await.with_context(|_| {
|
||||
InvalidRequestSnafu {
|
||||
context: format!("Failed to handle request: {:?}", req),
|
||||
}
|
||||
});
|
||||
if let Err(err) = ret {
|
||||
if retry < GRPC_MAX_RETRIES {
|
||||
retry += 1;
|
||||
warn!(
|
||||
"Failed to send request to grpc handle at Peer={:?}, retry = {}, error = {:?}",
|
||||
db.peer, retry, err
|
||||
);
|
||||
continue;
|
||||
} else {
|
||||
common_telemetry::error!(
|
||||
"Failed to send request to grpc handle at Peer={:?} after {} retries, error = {:?}",
|
||||
db.peer, retry, err
|
||||
);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
FrontendClient::Standalone { database_client } => {
|
||||
let ctx = QueryContextBuilder::default()
|
||||
|
||||
@@ -53,6 +53,7 @@ use crate::batching_mode::utils::{
|
||||
use crate::batching_mode::{
|
||||
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, MIN_REFRESH_DURATION, SLOW_QUERY_THRESHOLD,
|
||||
};
|
||||
use crate::df_optimizer::apply_df_optimizer;
|
||||
use crate::error::{
|
||||
ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu,
|
||||
SubstraitEncodeLogicalPlanSnafu, UnexpectedSnafu,
|
||||
@@ -541,7 +542,10 @@ impl BatchingTask {
|
||||
.clone()
|
||||
.rewrite(&mut add_auto_column)
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Failed to rewrite plan {:?}", self.config.plan),
|
||||
context: format!(
|
||||
"Failed to rewrite plan:\n {}\n",
|
||||
self.config.plan
|
||||
),
|
||||
})?
|
||||
.data;
|
||||
let schema_len = plan.schema().fields().len();
|
||||
@@ -573,16 +577,19 @@ impl BatchingTask {
|
||||
|
||||
let mut add_filter = AddFilterRewriter::new(expr);
|
||||
let mut add_auto_column = AddAutoColumnRewriter::new(sink_table_schema.clone());
|
||||
// make a not optimized plan for clearer unparse
|
||||
|
||||
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.config.query, false)
|
||||
.await?;
|
||||
plan.clone()
|
||||
let rewrite = plan
|
||||
.clone()
|
||||
.rewrite(&mut add_filter)
|
||||
.and_then(|p| p.data.rewrite(&mut add_auto_column))
|
||||
.with_context(|_| DatafusionSnafu {
|
||||
context: format!("Failed to rewrite plan {plan:?}"),
|
||||
context: format!("Failed to rewrite plan:\n {}\n", plan),
|
||||
})?
|
||||
.data
|
||||
.data;
|
||||
// only apply optimize after complex rewrite is done
|
||||
apply_df_optimizer(rewrite).await?
|
||||
};
|
||||
|
||||
Ok(Some((new_plan, schema_len)))
|
||||
|
||||
@@ -704,6 +704,28 @@ mod test {
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// complex time window index with where
|
||||
(
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number in (2, 3, 4) GROUP BY time_window;",
|
||||
Timestamp::new(1740394109, TimeUnit::Second),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// complex time window index with between and
|
||||
(
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE number BETWEEN 2 AND 4 GROUP BY time_window;",
|
||||
Timestamp::new(1740394109, TimeUnit::Second),
|
||||
(
|
||||
"ts".to_string(),
|
||||
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
||||
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
||||
),
|
||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
||||
),
|
||||
// no time index
|
||||
(
|
||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
||||
|
||||
@@ -342,8 +342,8 @@ impl TreeNodeRewriter for AddAutoColumnRewriter {
|
||||
}
|
||||
} else {
|
||||
return Err(DataFusionError::Plan(format!(
|
||||
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?} at node {:?}",
|
||||
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas(), node
|
||||
"Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}",
|
||||
query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas()
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -406,7 +406,9 @@ mod test {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Schema};
|
||||
use pretty_assertions::assert_eq;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use session::context::QueryContext;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use super::*;
|
||||
use crate::test_utils::create_test_query_engine;
|
||||
@@ -701,4 +703,18 @@ mod test {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_null_cast() {
|
||||
let query_engine = create_test_query_engine();
|
||||
let ctx = QueryContext::arc();
|
||||
let sql = "SELECT NULL::DOUBLE FROM numbers_with_ts";
|
||||
let plan = sql_to_df_plan(ctx, query_engine.clone(), sql, false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _sub_plan = DFLogicalSubstraitConvertor {}
|
||||
.encode(&plan, DefaultSerializer)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ use datafusion::config::ConfigOptions;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datafusion::functions_aggregate::count::count_udaf;
|
||||
use datafusion::functions_aggregate::sum::sum_udaf;
|
||||
use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule;
|
||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||
use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate;
|
||||
use datafusion::optimizer::optimize_projections::OptimizeProjections;
|
||||
@@ -42,6 +41,7 @@ use datafusion_expr::{
|
||||
BinaryExpr, ColumnarValue, Expr, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl,
|
||||
Signature, TypeSignature, Volatility,
|
||||
};
|
||||
use query::optimizer::count_wildcard::CountWildcardToTimeIndexRule;
|
||||
use query::parser::QueryLanguageParser;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use query::QueryEngine;
|
||||
@@ -61,9 +61,9 @@ pub async fn apply_df_optimizer(
|
||||
) -> Result<datafusion_expr::LogicalPlan, Error> {
|
||||
let cfg = ConfigOptions::new();
|
||||
let analyzer = Analyzer::with_rules(vec![
|
||||
Arc::new(CountWildcardRule::new()),
|
||||
Arc::new(AvgExpandRule::new()),
|
||||
Arc::new(TumbleExpandRule::new()),
|
||||
Arc::new(CountWildcardToTimeIndexRule),
|
||||
Arc::new(AvgExpandRule),
|
||||
Arc::new(TumbleExpandRule),
|
||||
Arc::new(CheckGroupByRule::new()),
|
||||
Arc::new(TypeCoercion::new()),
|
||||
]);
|
||||
@@ -128,13 +128,7 @@ pub async fn sql_to_flow_plan(
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct AvgExpandRule {}
|
||||
|
||||
impl AvgExpandRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
struct AvgExpandRule;
|
||||
|
||||
impl AnalyzerRule for AvgExpandRule {
|
||||
fn analyze(
|
||||
@@ -331,13 +325,7 @@ impl TreeNodeRewriter for ExpandAvgRewriter<'_> {
|
||||
|
||||
/// expand tumble in aggr expr to tumble_start and tumble_end with column name like `window_start`
|
||||
#[derive(Debug)]
|
||||
struct TumbleExpandRule {}
|
||||
|
||||
impl TumbleExpandRule {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
struct TumbleExpandRule;
|
||||
|
||||
impl AnalyzerRule for TumbleExpandRule {
|
||||
fn analyze(
|
||||
|
||||
@@ -302,7 +302,10 @@ impl PartitionTreeMemtable {
|
||||
fn update_stats(&self, metrics: &WriteMetrics) {
|
||||
// Only let the tracker tracks value bytes.
|
||||
self.alloc_tracker.on_allocation(metrics.value_bytes);
|
||||
metrics.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
|
||||
self.max_timestamp
|
||||
.fetch_max(metrics.max_ts, Ordering::SeqCst);
|
||||
self.min_timestamp
|
||||
.fetch_min(metrics.min_ts, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,8 +14,6 @@
|
||||
|
||||
//! Internal metrics of the memtable.
|
||||
|
||||
use std::sync::atomic::{AtomicI64, Ordering};
|
||||
|
||||
/// Metrics of writing memtables.
|
||||
pub(crate) struct WriteMetrics {
|
||||
/// Size allocated by keys.
|
||||
@@ -28,51 +26,6 @@ pub(crate) struct WriteMetrics {
|
||||
pub(crate) max_ts: i64,
|
||||
}
|
||||
|
||||
impl WriteMetrics {
|
||||
/// Update the min/max timestamp range according to current write metric.
|
||||
pub(crate) fn update_timestamp_range(&self, prev_max_ts: &AtomicI64, prev_min_ts: &AtomicI64) {
|
||||
loop {
|
||||
let current_min = prev_min_ts.load(Ordering::Relaxed);
|
||||
if self.min_ts >= current_min {
|
||||
break;
|
||||
}
|
||||
|
||||
let Err(updated) = prev_min_ts.compare_exchange(
|
||||
current_min,
|
||||
self.min_ts,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) else {
|
||||
break;
|
||||
};
|
||||
|
||||
if updated == self.min_ts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
let current_max = prev_max_ts.load(Ordering::Relaxed);
|
||||
if self.max_ts <= current_max {
|
||||
break;
|
||||
}
|
||||
|
||||
let Err(updated) = prev_max_ts.compare_exchange(
|
||||
current_max,
|
||||
self.max_ts,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) else {
|
||||
break;
|
||||
};
|
||||
|
||||
if updated == self.max_ts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WriteMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
||||
@@ -147,7 +147,8 @@ impl TimeSeriesMemtable {
|
||||
fn update_stats(&self, stats: WriteMetrics) {
|
||||
self.alloc_tracker
|
||||
.on_allocation(stats.key_bytes + stats.value_bytes);
|
||||
stats.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
|
||||
self.max_timestamp.fetch_max(stats.max_ts, Ordering::SeqCst);
|
||||
self.min_timestamp.fetch_min(stats.min_ts, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
fn write_key_value(&self, kv: KeyValue, stats: &mut WriteMetrics) -> Result<()> {
|
||||
|
||||
@@ -322,13 +322,10 @@ impl ScanRegion {
|
||||
let memtables: Vec<_> = memtables
|
||||
.into_iter()
|
||||
.filter(|mem| {
|
||||
if mem.is_empty() {
|
||||
// check if memtable is empty by reading stats.
|
||||
let Some((start, end)) = mem.stats().time_range() else {
|
||||
return false;
|
||||
}
|
||||
let stats = mem.stats();
|
||||
// Safety: the memtable is not empty.
|
||||
let (start, end) = stats.time_range().unwrap();
|
||||
|
||||
};
|
||||
// The time range of the memtable is inclusive.
|
||||
let memtable_range = TimestampRange::new_inclusive(Some(start), Some(end));
|
||||
memtable_range.intersects(&time_range)
|
||||
|
||||
@@ -134,6 +134,7 @@ impl WriteFormat {
|
||||
|
||||
/// Helper for reading the SST format.
|
||||
pub struct ReadFormat {
|
||||
/// The metadata stored in the SST.
|
||||
metadata: RegionMetadataRef,
|
||||
/// SST file schema.
|
||||
arrow_schema: SchemaRef,
|
||||
@@ -305,17 +306,23 @@ impl ReadFormat {
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column_id: ColumnId,
|
||||
) -> Option<ArrayRef> {
|
||||
let column = self.metadata.column_by_id(column_id)?;
|
||||
) -> StatValues {
|
||||
let Some(column) = self.metadata.column_by_id(column_id) else {
|
||||
// No such column in the SST.
|
||||
return StatValues::NoColumn;
|
||||
};
|
||||
match column.semantic_type {
|
||||
SemanticType::Tag => self.tag_values(row_groups, column, true),
|
||||
SemanticType::Field => {
|
||||
let index = self.field_id_to_index.get(&column_id)?;
|
||||
Self::column_values(row_groups, column, *index, true)
|
||||
// Safety: `field_id_to_index` is initialized by the semantic type.
|
||||
let index = self.field_id_to_index.get(&column_id).unwrap();
|
||||
let stats = Self::column_values(row_groups, column, *index, true);
|
||||
StatValues::from_stats_opt(stats)
|
||||
}
|
||||
SemanticType::Timestamp => {
|
||||
let index = self.time_index_position();
|
||||
Self::column_values(row_groups, column, index, true)
|
||||
let stats = Self::column_values(row_groups, column, index, true);
|
||||
StatValues::from_stats_opt(stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -325,17 +332,23 @@ impl ReadFormat {
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column_id: ColumnId,
|
||||
) -> Option<ArrayRef> {
|
||||
let column = self.metadata.column_by_id(column_id)?;
|
||||
) -> StatValues {
|
||||
let Some(column) = self.metadata.column_by_id(column_id) else {
|
||||
// No such column in the SST.
|
||||
return StatValues::NoColumn;
|
||||
};
|
||||
match column.semantic_type {
|
||||
SemanticType::Tag => self.tag_values(row_groups, column, false),
|
||||
SemanticType::Field => {
|
||||
let index = self.field_id_to_index.get(&column_id)?;
|
||||
Self::column_values(row_groups, column, *index, false)
|
||||
// Safety: `field_id_to_index` is initialized by the semantic type.
|
||||
let index = self.field_id_to_index.get(&column_id).unwrap();
|
||||
let stats = Self::column_values(row_groups, column, *index, false);
|
||||
StatValues::from_stats_opt(stats)
|
||||
}
|
||||
SemanticType::Timestamp => {
|
||||
let index = self.time_index_position();
|
||||
Self::column_values(row_groups, column, index, false)
|
||||
let stats = Self::column_values(row_groups, column, index, false);
|
||||
StatValues::from_stats_opt(stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -345,17 +358,23 @@ impl ReadFormat {
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column_id: ColumnId,
|
||||
) -> Option<ArrayRef> {
|
||||
let column = self.metadata.column_by_id(column_id)?;
|
||||
) -> StatValues {
|
||||
let Some(column) = self.metadata.column_by_id(column_id) else {
|
||||
// No such column in the SST.
|
||||
return StatValues::NoColumn;
|
||||
};
|
||||
match column.semantic_type {
|
||||
SemanticType::Tag => None,
|
||||
SemanticType::Tag => StatValues::NoStats,
|
||||
SemanticType::Field => {
|
||||
let index = self.field_id_to_index.get(&column_id)?;
|
||||
Self::column_null_counts(row_groups, *index)
|
||||
// Safety: `field_id_to_index` is initialized by the semantic type.
|
||||
let index = self.field_id_to_index.get(&column_id).unwrap();
|
||||
let stats = Self::column_null_counts(row_groups, *index);
|
||||
StatValues::from_stats_opt(stats)
|
||||
}
|
||||
SemanticType::Timestamp => {
|
||||
let index = self.time_index_position();
|
||||
Self::column_null_counts(row_groups, index)
|
||||
let stats = Self::column_null_counts(row_groups, index);
|
||||
StatValues::from_stats_opt(stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -390,8 +409,7 @@ impl ReadFormat {
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column: &ColumnMetadata,
|
||||
is_min: bool,
|
||||
) -> Option<ArrayRef> {
|
||||
let primary_key_encoding = self.metadata.primary_key_encoding;
|
||||
) -> StatValues {
|
||||
let is_first_tag = self
|
||||
.metadata
|
||||
.primary_key
|
||||
@@ -400,9 +418,28 @@ impl ReadFormat {
|
||||
.unwrap_or(false);
|
||||
if !is_first_tag {
|
||||
// Only the min-max of the first tag is available in the primary key.
|
||||
return None;
|
||||
return StatValues::NoStats;
|
||||
}
|
||||
|
||||
StatValues::from_stats_opt(self.first_tag_values(row_groups, column, is_min))
|
||||
}
|
||||
|
||||
/// Returns min/max values of the first tag.
|
||||
/// Returns None if the tag does not have statistics.
|
||||
fn first_tag_values(
|
||||
&self,
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column: &ColumnMetadata,
|
||||
is_min: bool,
|
||||
) -> Option<ArrayRef> {
|
||||
debug_assert!(self
|
||||
.metadata
|
||||
.primary_key
|
||||
.first()
|
||||
.map(|id| *id == column.column_id)
|
||||
.unwrap_or(false));
|
||||
|
||||
let primary_key_encoding = self.metadata.primary_key_encoding;
|
||||
let converter = build_primary_key_codec_with_fields(
|
||||
primary_key_encoding,
|
||||
[(
|
||||
@@ -452,6 +489,7 @@ impl ReadFormat {
|
||||
}
|
||||
|
||||
/// Returns min/max values of specific non-tag columns.
|
||||
/// Returns None if the column does not have statistics.
|
||||
fn column_values(
|
||||
row_groups: &[impl Borrow<RowGroupMetaData>],
|
||||
column: &ColumnMetadata,
|
||||
@@ -544,6 +582,29 @@ impl ReadFormat {
|
||||
}
|
||||
}
|
||||
|
||||
/// Values of column statistics of the SST.
|
||||
///
|
||||
/// It also distinguishes the case that a column is not found and
|
||||
/// the column exists but has no statistics.
|
||||
pub enum StatValues {
|
||||
/// Values of each row group.
|
||||
Values(ArrayRef),
|
||||
/// No such column.
|
||||
NoColumn,
|
||||
/// Column exists but has no statistics.
|
||||
NoStats,
|
||||
}
|
||||
|
||||
impl StatValues {
|
||||
/// Creates a new `StatValues` instance from optional statistics.
|
||||
pub fn from_stats_opt(stats: Option<ArrayRef>) -> Self {
|
||||
match stats {
|
||||
Some(stats) => StatValues::Values(stats),
|
||||
None => StatValues::NoStats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl ReadFormat {
|
||||
/// Creates a helper with existing `metadata` and all columns.
|
||||
|
||||
@@ -25,7 +25,7 @@ use parquet::file::metadata::RowGroupMetaData;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::storage::ColumnId;
|
||||
|
||||
use crate::sst::parquet::format::ReadFormat;
|
||||
use crate::sst::parquet::format::{ReadFormat, StatValues};
|
||||
|
||||
/// Statistics for pruning row groups.
|
||||
pub(crate) struct RowGroupPruningStats<'a, T> {
|
||||
@@ -100,16 +100,18 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
|
||||
fn min_values(&self, column: &Column) -> Option<ArrayRef> {
|
||||
let column_id = self.column_id_to_prune(&column.name)?;
|
||||
match self.read_format.min_values(self.row_groups, column_id) {
|
||||
Some(values) => Some(values),
|
||||
None => self.compat_default_value(&column.name),
|
||||
StatValues::Values(values) => Some(values),
|
||||
StatValues::NoColumn => self.compat_default_value(&column.name),
|
||||
StatValues::NoStats => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn max_values(&self, column: &Column) -> Option<ArrayRef> {
|
||||
let column_id = self.column_id_to_prune(&column.name)?;
|
||||
match self.read_format.max_values(self.row_groups, column_id) {
|
||||
Some(values) => Some(values),
|
||||
None => self.compat_default_value(&column.name),
|
||||
StatValues::Values(values) => Some(values),
|
||||
StatValues::NoColumn => self.compat_default_value(&column.name),
|
||||
StatValues::NoStats => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,10 +120,12 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
|
||||
}
|
||||
|
||||
fn null_counts(&self, column: &Column) -> Option<ArrayRef> {
|
||||
let Some(column_id) = self.column_id_to_prune(&column.name) else {
|
||||
return self.compat_null_count(&column.name);
|
||||
};
|
||||
self.read_format.null_counts(self.row_groups, column_id)
|
||||
let column_id = self.column_id_to_prune(&column.name)?;
|
||||
match self.read_format.null_counts(self.row_groups, column_id) {
|
||||
StatValues::Values(values) => Some(values),
|
||||
StatValues::NoColumn => self.compat_null_count(&column.name),
|
||||
StatValues::NoStats => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn row_counts(&self, _column: &Column) -> Option<ArrayRef> {
|
||||
|
||||
@@ -28,7 +28,7 @@ pub mod error;
|
||||
pub mod executor;
|
||||
pub mod log_query;
|
||||
pub mod metrics;
|
||||
mod optimizer;
|
||||
pub mod optimizer;
|
||||
pub mod options;
|
||||
pub mod parser;
|
||||
mod part_sort;
|
||||
|
||||
269
tests/cases/standalone/common/flow/flow_step_aggr.result
Normal file
269
tests/cases/standalone/common/flow/flow_step_aggr.result
Normal file
@@ -0,0 +1,269 @@
|
||||
CREATE TABLE access_log (
|
||||
"url" STRING,
|
||||
user_id BIGINT,
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
PRIMARY KEY ("url", user_id)
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE access_log_10s (
|
||||
"url" STRING,
|
||||
time_window timestamp time INDEX,
|
||||
state BINARY,
|
||||
PRIMARY KEY ("url")
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_access_log_10s SINK TO access_log_10s
|
||||
AS
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('10s'::INTERVAL, ts) AS time_window,
|
||||
hll(user_id) AS state
|
||||
FROM
|
||||
access_log
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- insert 4 rows of data
|
||||
INSERT INTO access_log VALUES
|
||||
("/dashboard", 1, "2025-03-04 00:00:00"),
|
||||
("/dashboard", 1, "2025-03-04 00:00:01"),
|
||||
("/dashboard", 2, "2025-03-04 00:00:05"),
|
||||
("/not_found", 3, "2025-03-04 00:00:11"),
|
||||
("/dashboard", 4, "2025-03-04 00:00:15");
|
||||
|
||||
Affected Rows: 5
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_access_log_10s');
|
||||
|
||||
+-----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_access_log_10s') |
|
||||
+-----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+-----------------------------------------+
|
||||
|
||||
-- query should return 3 rows
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+------------+---------------------+
|
||||
| url | time_window |
|
||||
+------------+---------------------+
|
||||
| /dashboard | 2025-03-04T00:00:00 |
|
||||
| /dashboard | 2025-03-04T00:00:10 |
|
||||
| /not_found | 2025-03-04T00:00:10 |
|
||||
+------------+---------------------+
|
||||
|
||||
-- use hll_count to query the approximate data in access_log_10s
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window, hll_count(state) FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+------------+---------------------+---------------------------------+
|
||||
| url | time_window | hll_count(access_log_10s.state) |
|
||||
+------------+---------------------+---------------------------------+
|
||||
| /dashboard | 2025-03-04T00:00:00 | 2 |
|
||||
| /dashboard | 2025-03-04T00:00:10 | 1 |
|
||||
| /not_found | 2025-03-04T00:00:10 | 1 |
|
||||
+------------+---------------------+---------------------------------+
|
||||
|
||||
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
|
||||
hll_count(hll_merge(state)) as uv_per_min
|
||||
FROM
|
||||
access_log_10s
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window_1m
|
||||
ORDER BY
|
||||
time_window_1m;
|
||||
|
||||
+------------+---------------------+------------+
|
||||
| url | time_window_1m | uv_per_min |
|
||||
+------------+---------------------+------------+
|
||||
| /dashboard | 2025-03-04T00:00:00 | 3 |
|
||||
| /not_found | 2025-03-04T00:00:00 | 1 |
|
||||
+------------+---------------------+------------+
|
||||
|
||||
DROP FLOW calc_access_log_10s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE access_log_10s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE access_log;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, "value") AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
WHERE
|
||||
"value" > 0 AND "value" < 70
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
+----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_percentile_5s') |
|
||||
+----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+----------------------------------------+
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, `percentile_state`) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+---------------------+--------------------+
|
||||
| time_window | p99 |
|
||||
+---------------------+--------------------+
|
||||
| 1970-01-01T00:00:00 | 40.04777053326359 |
|
||||
| 1970-01-01T00:00:05 | 59.745049810145126 |
|
||||
+---------------------+--------------------+
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, CASE WHEN "value" > 0 AND "value" < 70 THEN "value" ELSE NULL END) AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
+----------------------------------------+
|
||||
| ADMIN FLUSH_FLOW('calc_percentile_5s') |
|
||||
+----------------------------------------+
|
||||
| FLOW_FLUSHED |
|
||||
+----------------------------------------+
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
+---------------------+--------------------+
|
||||
| time_window | p99 |
|
||||
+---------------------+--------------------+
|
||||
| 1970-01-01T00:00:00 | 40.04777053326359 |
|
||||
| 1970-01-01T00:00:05 | 59.745049810145126 |
|
||||
| 1970-01-01T00:00:10 | |
|
||||
+---------------------+--------------------+
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_5s;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
164
tests/cases/standalone/common/flow/flow_step_aggr.sql
Normal file
164
tests/cases/standalone/common/flow/flow_step_aggr.sql
Normal file
@@ -0,0 +1,164 @@
|
||||
CREATE TABLE access_log (
|
||||
"url" STRING,
|
||||
user_id BIGINT,
|
||||
ts TIMESTAMP TIME INDEX,
|
||||
PRIMARY KEY ("url", user_id)
|
||||
);
|
||||
|
||||
CREATE TABLE access_log_10s (
|
||||
"url" STRING,
|
||||
time_window timestamp time INDEX,
|
||||
state BINARY,
|
||||
PRIMARY KEY ("url")
|
||||
);
|
||||
|
||||
CREATE FLOW calc_access_log_10s SINK TO access_log_10s
|
||||
AS
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('10s'::INTERVAL, ts) AS time_window,
|
||||
hll(user_id) AS state
|
||||
FROM
|
||||
access_log
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window;
|
||||
|
||||
-- insert 4 rows of data
|
||||
INSERT INTO access_log VALUES
|
||||
("/dashboard", 1, "2025-03-04 00:00:00"),
|
||||
("/dashboard", 1, "2025-03-04 00:00:01"),
|
||||
("/dashboard", 2, "2025-03-04 00:00:05"),
|
||||
("/not_found", 3, "2025-03-04 00:00:11"),
|
||||
("/dashboard", 4, "2025-03-04 00:00:15");
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_access_log_10s');
|
||||
|
||||
-- query should return 3 rows
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
-- use hll_count to query the approximate data in access_log_10s
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT "url", time_window, hll_count(state) FROM access_log_10s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
|
||||
-- SQLNESS SORT_RESULT 3 1
|
||||
SELECT
|
||||
"url",
|
||||
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
|
||||
hll_count(hll_merge(state)) as uv_per_min
|
||||
FROM
|
||||
access_log_10s
|
||||
GROUP BY
|
||||
"url",
|
||||
time_window_1m
|
||||
ORDER BY
|
||||
time_window_1m;
|
||||
|
||||
DROP FLOW calc_access_log_10s;
|
||||
DROP TABLE access_log_10s;
|
||||
DROP TABLE access_log;
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, "value") AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
WHERE
|
||||
"value" > 0 AND "value" < 70
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, `percentile_state`) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
DROP TABLE percentile_5s;
|
||||
DROP TABLE percentile_base;
|
||||
|
||||
CREATE TABLE percentile_base (
|
||||
"id" INT PRIMARY KEY,
|
||||
"value" DOUBLE,
|
||||
ts timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE TABLE percentile_5s (
|
||||
"percentile_state" BINARY,
|
||||
time_window timestamp(0) time index
|
||||
);
|
||||
|
||||
CREATE FLOW calc_percentile_5s SINK TO percentile_5s
|
||||
AS
|
||||
SELECT
|
||||
uddsketch_state(128, 0.01, CASE WHEN "value" > 0 AND "value" < 70 THEN "value" ELSE NULL END) AS "value",
|
||||
date_bin('5 seconds'::INTERVAL, ts) AS time_window
|
||||
FROM
|
||||
percentile_base
|
||||
GROUP BY
|
||||
time_window;
|
||||
|
||||
INSERT INTO percentile_base ("id", "value", ts) VALUES
|
||||
(1, 10.0, 1),
|
||||
(2, 20.0, 2),
|
||||
(3, 30.0, 3),
|
||||
(4, 40.0, 4),
|
||||
(5, 50.0, 5),
|
||||
(6, 60.0, 6),
|
||||
(7, 70.0, 7),
|
||||
(8, 80.0, 8),
|
||||
(9, 90.0, 9),
|
||||
(10, 100.0, 10);
|
||||
|
||||
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
|
||||
ADMIN FLUSH_FLOW('calc_percentile_5s');
|
||||
|
||||
SELECT
|
||||
time_window,
|
||||
uddsketch_calc(0.99, percentile_state) AS p99
|
||||
FROM
|
||||
percentile_5s
|
||||
ORDER BY
|
||||
time_window;
|
||||
|
||||
DROP FLOW calc_percentile_5s;
|
||||
DROP TABLE percentile_5s;
|
||||
DROP TABLE percentile_base;
|
||||
@@ -9,7 +9,7 @@ select GREATEST('1999-01-30', '2023-03-01');
|
||||
+-------------------------------------------------+
|
||||
| greatest(Utf8("1999-01-30"),Utf8("2023-03-01")) |
|
||||
+-------------------------------------------------+
|
||||
| 2023-03-01T00:00:00 |
|
||||
| 2023-03-01 |
|
||||
+-------------------------------------------------+
|
||||
|
||||
select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);
|
||||
|
||||
@@ -54,7 +54,11 @@ Error: 2000(InvalidSyntax), Invalid SQL syntax: sql parser error: Can't use the
|
||||
-- 2.2 no align param
|
||||
SELECT min(val) RANGE '5s' FROM host;
|
||||
|
||||
Error: 3000(PlanQuery), Error during planning: Missing argument in range select query
|
||||
Error: 2000(InvalidSyntax), Invalid SQL syntax: sql parser error: ALIGN argument cannot be omitted in the range select query
|
||||
|
||||
SELECT min(val) RANGE '5s' FILL PREV FROM host;
|
||||
|
||||
Error: 2000(InvalidSyntax), Invalid SQL syntax: sql parser error: ALIGN argument cannot be omitted in the range select query
|
||||
|
||||
-- 2.3 type mismatch
|
||||
SELECT covar(ceil(val), floor(val)) RANGE '20s' FROM host ALIGN '10s';
|
||||
|
||||
@@ -40,6 +40,8 @@ SELECT 1 RANGE '10s' FILL NULL FROM host ALIGN '1h' FILL NULL;
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host;
|
||||
|
||||
SELECT min(val) RANGE '5s' FILL PREV FROM host;
|
||||
|
||||
-- 2.3 type mismatch
|
||||
|
||||
SELECT covar(ceil(val), floor(val)) RANGE '20s' FROM host ALIGN '10s';
|
||||
|
||||
158
tests/cases/standalone/common/select/prune_pk.result
Normal file
158
tests/cases/standalone/common/select/prune_pk.result
Normal file
@@ -0,0 +1,158 @@
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_filter` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `flag` INT NULL, `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`, `flag`) ) ENGINE=mito;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:00:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 0, 421, '2023-05-15 10:05:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 356, '2023-05-15 10:10:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
+-------------------------------------------+
|
||||
| ADMIN FLUSH_TABLE('test_multi_pk_filter') |
|
||||
+-------------------------------------------+
|
||||
| 0 |
|
||||
+-------------------------------------------+
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 412, '2023-05-15 10:15:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 298, '2023-05-15 10:20:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:25:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5874, '2023-05-15 10:30:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
+-------------------------------------------+
|
||||
| ADMIN FLUSH_TABLE('test_multi_pk_filter') |
|
||||
+-------------------------------------------+
|
||||
| 0 |
|
||||
+-------------------------------------------+
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 6132, '2023-05-15 10:35:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1287, '2023-05-15 10:40:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1432, '2023-05-15 10:45:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1056, '2023-05-15 10:50:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
+---------------------+---------------+------------+-------+
|
||||
| greptime_timestamp | namespace | env | total |
|
||||
+---------------------+---------------+------------+-------+
|
||||
| 2023-05-15T10:00:00 | thermostat_v2 | production | 5289 |
|
||||
| 2023-05-15T10:10:00 | thermostat_v2 | dev | 356 |
|
||||
| 2023-05-15T10:15:00 | thermostat_v2 | dev | 412 |
|
||||
| 2023-05-15T10:20:00 | thermostat_v2 | dev | 298 |
|
||||
| 2023-05-15T10:25:00 | thermostat_v2 | production | 5289 |
|
||||
| 2023-05-15T10:30:00 | thermostat_v2 | production | 5874 |
|
||||
| 2023-05-15T10:35:00 | thermostat_v2 | production | 6132 |
|
||||
| 2023-05-15T10:40:00 | thermostat_v2 | testing | 1287 |
|
||||
| 2023-05-15T10:45:00 | thermostat_v2 | testing | 1432 |
|
||||
| 2023-05-15T10:50:00 | thermostat_v2 | testing | 1056 |
|
||||
+---------------------+---------------+------------+-------+
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2' AND env='dev'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
+---------------------+---------------+-----+-------+
|
||||
| greptime_timestamp | namespace | env | total |
|
||||
+---------------------+---------------+-----+-------+
|
||||
| 2023-05-15T10:10:00 | thermostat_v2 | dev | 356 |
|
||||
| 2023-05-15T10:15:00 | thermostat_v2 | dev | 412 |
|
||||
| 2023-05-15T10:20:00 | thermostat_v2 | dev | 298 |
|
||||
+---------------------+---------------+-----+-------+
|
||||
|
||||
DROP TABLE test_multi_pk_filter;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_null` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`) ) ENGINE=mito;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 5289, '2023-05-15 10:00:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 421, '2023-05-15 10:05:00');
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_null');
|
||||
|
||||
+-----------------------------------------+
|
||||
| ADMIN FLUSH_TABLE('test_multi_pk_null') |
|
||||
+-----------------------------------------+
|
||||
| 0 |
|
||||
+-----------------------------------------+
|
||||
|
||||
SELECT * FROM test_multi_pk_null WHERE env IS NOT NULL;
|
||||
|
||||
+---------------+------------+-------+---------------------+
|
||||
| namespace | env | total | greptime_timestamp |
|
||||
+---------------+------------+-------+---------------------+
|
||||
| thermostat_v2 | production | 5289 | 2023-05-15T10:00:00 |
|
||||
| thermostat_v2 | production | 421 | 2023-05-15T10:05:00 |
|
||||
+---------------+------------+-------+---------------------+
|
||||
|
||||
DROP TABLE test_multi_pk_null;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
66
tests/cases/standalone/common/select/prune_pk.sql
Normal file
66
tests/cases/standalone/common/select/prune_pk.sql
Normal file
@@ -0,0 +1,66 @@
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_filter` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `flag` INT NULL, `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`, `flag`) ) ENGINE=mito;
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:00:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 0, 421, '2023-05-15 10:05:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 356, '2023-05-15 10:10:00');
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 412, '2023-05-15 10:15:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'dev', 1, 298, '2023-05-15 10:20:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:25:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 5874, '2023-05-15 10:30:00');
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_filter');
|
||||
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 1, 6132, '2023-05-15 10:35:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1287, '2023-05-15 10:40:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1432, '2023-05-15 10:45:00');
|
||||
INSERT INTO test_multi_pk_filter
|
||||
(namespace, env, flag, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'testing', 1, 1056, '2023-05-15 10:50:00');
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
|
||||
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2' AND env='dev'
|
||||
ORDER BY greptime_timestamp;
|
||||
|
||||
DROP TABLE test_multi_pk_filter;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `test_multi_pk_null` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`) ) ENGINE=mito;
|
||||
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 5289, '2023-05-15 10:00:00');
|
||||
INSERT INTO test_multi_pk_null
|
||||
(namespace, env, total, greptime_timestamp)
|
||||
VALUES ('thermostat_v2', 'production', 421, '2023-05-15 10:05:00');
|
||||
|
||||
ADMIN FLUSH_TABLE('test_multi_pk_null');
|
||||
|
||||
SELECT * FROM test_multi_pk_null WHERE env IS NOT NULL;
|
||||
|
||||
DROP TABLE test_multi_pk_null;
|
||||
Reference in New Issue
Block a user