mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-24 07:00:00 +00:00
Compare commits
11 Commits
test/scan-
...
feat/prefi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab1928d5fd | ||
|
|
09dacc8e9b | ||
|
|
dec439db2b | ||
|
|
dc76571166 | ||
|
|
3e17f8c426 | ||
|
|
a5df3954f3 | ||
|
|
32fd850c20 | ||
|
|
2bfdae4f8f | ||
|
|
fcb898e9a4 | ||
|
|
8fa2fdfc42 | ||
|
|
4dc1a1d60f |
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.13.0
|
||||
NEXT_RELEASE_VERSION: v0.14.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -54,3 +54,6 @@ tests-fuzz/corpus/
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
166
Cargo.lock
generated
166
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -710,7 +710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1324,7 +1324,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1661,7 +1661,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1704,7 +1704,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1713,7 +1713,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1740,7 +1740,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1781,7 +1781,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1842,7 +1842,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1888,7 +1888,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1910,11 +1910,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1939,7 +1939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1975,7 +1975,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -1988,7 +1988,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"http 1.1.0",
|
||||
"snafu 0.8.5",
|
||||
@@ -1998,7 +1998,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2008,7 +2008,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2059,7 +2059,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2076,7 +2076,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2104,7 +2104,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2123,7 +2123,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2137,7 +2137,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2150,7 +2150,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2211,7 +2211,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2220,11 +2220,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2236,7 +2236,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2263,7 +2263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2271,7 +2271,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2297,7 +2297,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2316,7 +2316,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2346,7 +2346,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2374,7 +2374,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2386,7 +2386,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2404,7 +2404,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2414,7 +2414,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3345,7 +3345,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3397,7 +3397,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3406,7 +3406,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4050,7 +4050,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4119,11 +4119,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.0.34"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0"
|
||||
checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"libz-rs-sys",
|
||||
"libz-sys",
|
||||
"miniz_oxide",
|
||||
]
|
||||
@@ -4160,7 +4161,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4222,7 +4223,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4277,7 +4278,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -5545,7 +5546,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6278,6 +6279,15 @@ dependencies = [
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-rs-sys"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "902bc563b5d65ad9bba616b490842ef0651066a1a1dc3ce1087113ffcb873c8d"
|
||||
dependencies = [
|
||||
"zlib-rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.20"
|
||||
@@ -6338,7 +6348,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6350,7 +6360,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6643,7 +6653,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6670,7 +6680,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6757,7 +6767,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6822,9 +6832,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.0"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
|
||||
checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
@@ -6855,7 +6865,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7552,7 +7562,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7801,7 +7811,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7849,7 +7859,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8086,7 +8096,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8354,7 +8364,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8494,7 +8504,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -8756,7 +8766,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9003,7 +9013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9044,7 +9054,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9109,7 +9119,7 @@ dependencies = [
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10464,7 +10474,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -10581,7 +10591,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10890,7 +10900,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10944,7 +10954,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11261,7 +11271,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11391,7 +11401,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11572,7 +11582,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11823,7 +11833,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11867,7 +11877,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11933,7 +11943,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -13954,6 +13964,12 @@ dependencies = [
|
||||
"syn 2.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zlib-rs"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b20717f0917c908dc63de2e44e97f1e6b126ca58d0e391cee86d504eb8fbd05"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.11.2+zstd.1.5.2"
|
||||
|
||||
@@ -67,7 +67,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -126,6 +126,7 @@ deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.14"
|
||||
flate2 = { version = "1.1.0", default-features = false, features = ["zlib-rs"] }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
|
||||
@@ -112,7 +112,7 @@ Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
-v "$(pwd)/greptimedb:./greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -181,7 +181,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -279,7 +279,7 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -308,7 +308,7 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
@@ -352,7 +352,7 @@
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -442,7 +442,7 @@
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -522,7 +522,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -579,7 +579,7 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
|
||||
@@ -119,7 +119,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -265,7 +265,7 @@ overwrite_entry_start_id = false
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -618,7 +618,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -76,7 +76,7 @@ retry_interval = "3s"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
@@ -121,4 +121,3 @@ sample_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ tcp_nodelay = true
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
@@ -177,7 +177,7 @@ backoff_deadline = "5mins"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -164,7 +164,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -352,7 +352,7 @@ retry_delay = "500ms"
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -705,7 +705,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
@@ -68,12 +68,13 @@ services:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --data-home=/greptimedb_data
|
||||
- --rpc-bind-addr=0.0.0.0:3001
|
||||
- --rpc-server-addr=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
|
||||
@@ -132,6 +132,15 @@ pub fn options_from_skipping(skipping: &SkippingIndexOptions) -> Result<Option<C
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` for inverted index.
|
||||
pub fn options_from_inverted() -> ColumnOptions {
|
||||
let mut options = ColumnOptions::default();
|
||||
options
|
||||
.options
|
||||
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string());
|
||||
options
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
match analyzer {
|
||||
|
||||
@@ -406,7 +406,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -420,7 +420,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -467,7 +467,7 @@ mod tests {
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert_eq!("./greptimedb_data/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
@@ -483,7 +483,10 @@ mod tests {
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -526,7 +529,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -536,7 +539,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -565,11 +568,11 @@ mod tests {
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -448,7 +448,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -466,7 +466,10 @@ mod tests {
|
||||
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
fe_opts.logging.dir
|
||||
);
|
||||
assert!(!fe_opts.opentsdb.enable);
|
||||
}
|
||||
|
||||
@@ -505,7 +508,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -515,7 +518,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
@@ -358,7 +358,10 @@ mod tests {
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
@@ -396,7 +399,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -406,7 +409,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -424,7 +427,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -852,7 +852,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
dir = "./greptimedb_data/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -860,7 +860,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -892,7 +892,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
@@ -922,7 +922,10 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/test/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/wal",
|
||||
raft_engine_config.dir.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
@@ -946,7 +949,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -958,7 +961,7 @@ mod tests {
|
||||
|
||||
let opts = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -967,7 +970,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.component;
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -56,13 +56,13 @@ fn test_load_datanode_example_config() {
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![
|
||||
@@ -159,10 +159,10 @@ fn test_load_metasrv_example_config() {
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
data_home: "./greptimedb_data/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
@@ -202,7 +202,7 @@ fn test_load_standalone_example_config() {
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
@@ -219,7 +219,7 @@ fn test_load_standalone_example_config() {
|
||||
}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
|
||||
@@ -161,7 +161,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -170,7 +170,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "./greptimedb_data/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
|
||||
@@ -27,6 +27,7 @@ mod vector_div;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
mod vector_sub;
|
||||
mod vector_subvector;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -56,6 +57,7 @@ impl VectorFunction {
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_dim::VectorDimFunction));
|
||||
registry.register(Arc::new(vector_subvector::VectorSubvectorFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
|
||||
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_subvector";
|
||||
|
||||
/// Returns a subvector from start(included) to end(excluded) index.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_subvector("[1, 2, 3, 4, 5]", 1, 3)) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [2, 3] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorSubvectorFunction;
|
||||
|
||||
impl Function for VectorSubvectorFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::binary_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly three, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
let arg2 = &columns[2];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len() && arg1.len() == arg2.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}, args 2: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
arg2.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = arg1.get(i).as_i64();
|
||||
let arg2 = arg2.get(i).as_i64();
|
||||
let (Some(arg0), Some(arg1), Some(arg2)) = (arg0, arg1, arg2) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
ensure!(
|
||||
0 <= arg1 && arg1 <= arg2 && arg2 as usize <= arg0.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid start and end indices: start={}, end={}, vec_len={}",
|
||||
arg1,
|
||||
arg2,
|
||||
arg0.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let subvector = &arg0[arg1 as usize..arg2 as usize];
|
||||
let binlit = veclit_to_binlit(subvector);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorSubvectorFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_subvector() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0, 4.0, 5.0]".to_string()),
|
||||
Some("[6.0, 7.0, 8.0, 9.0, 10.0]".to_string()),
|
||||
None,
|
||||
Some("[11.0, 12.0, 13.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(0), Some(0), Some(1)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(5), Some(2), Some(3)]));
|
||||
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1, input2])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[6.0, 7.0, 8.0, 9.0, 10.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert_eq!(
|
||||
result.get_ref(3).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[12.0, 13.0]).as_slice())
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_subvector_error() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(2)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 2, args 1: 2, args 2: 1"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subvector_invalid_indices() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(3)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(4)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"Invalid start and end indices: start=3, end=4, vec_len=3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ use datafusion_common::cast::{as_boolean_array, as_null_array};
|
||||
use datafusion_common::{internal_err, DataFusionError, ScalarValue};
|
||||
use datatypes::arrow::array::{Array, BooleanArray, RecordBatch};
|
||||
use datatypes::arrow::compute::filter_record_batch;
|
||||
use datatypes::compute::or_kleene;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -47,6 +48,8 @@ pub struct SimpleFilterEvaluator {
|
||||
literal: Scalar<ArrayRef>,
|
||||
/// The operator.
|
||||
op: Operator,
|
||||
/// Only used when the operator is `Or`-chain.
|
||||
literal_list: Vec<Scalar<ArrayRef>>,
|
||||
}
|
||||
|
||||
impl SimpleFilterEvaluator {
|
||||
@@ -69,6 +72,7 @@ impl SimpleFilterEvaluator {
|
||||
column_name,
|
||||
literal: val.to_scalar().ok()?,
|
||||
op,
|
||||
literal_list: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -83,6 +87,35 @@ impl SimpleFilterEvaluator {
|
||||
| Operator::LtEq
|
||||
| Operator::Gt
|
||||
| Operator::GtEq => {}
|
||||
Operator::Or => {
|
||||
let lhs = Self::try_new(&binary.left)?;
|
||||
let rhs = Self::try_new(&binary.right)?;
|
||||
if lhs.column_name != rhs.column_name
|
||||
|| !matches!(lhs.op, Operator::Eq | Operator::Or)
|
||||
|| !matches!(rhs.op, Operator::Eq | Operator::Or)
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let mut list = vec![];
|
||||
let placeholder_literal = lhs.literal.clone();
|
||||
// above check guarantees the op is either `Eq` or `Or`
|
||||
if matches!(lhs.op, Operator::Or) {
|
||||
list.extend(lhs.literal_list);
|
||||
} else {
|
||||
list.push(lhs.literal);
|
||||
}
|
||||
if matches!(rhs.op, Operator::Or) {
|
||||
list.extend(rhs.literal_list);
|
||||
} else {
|
||||
list.push(rhs.literal);
|
||||
}
|
||||
return Some(Self {
|
||||
column_name: lhs.column_name,
|
||||
literal: placeholder_literal,
|
||||
op: Operator::Or,
|
||||
literal_list: list,
|
||||
});
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
|
||||
@@ -103,6 +136,7 @@ impl SimpleFilterEvaluator {
|
||||
column_name: lhs.name.clone(),
|
||||
literal,
|
||||
op,
|
||||
literal_list: vec![],
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
@@ -118,19 +152,19 @@ impl SimpleFilterEvaluator {
|
||||
let input = input
|
||||
.to_scalar()
|
||||
.with_context(|_| ToArrowScalarSnafu { v: input.clone() })?;
|
||||
let result = self.evaluate_datum(&input)?;
|
||||
let result = self.evaluate_datum(&input, 1)?;
|
||||
Ok(result.value(0))
|
||||
}
|
||||
|
||||
pub fn evaluate_array(&self, input: &ArrayRef) -> Result<BooleanBuffer> {
|
||||
self.evaluate_datum(input)
|
||||
self.evaluate_datum(input, input.len())
|
||||
}
|
||||
|
||||
pub fn evaluate_vector(&self, input: &VectorRef) -> Result<BooleanBuffer> {
|
||||
self.evaluate_datum(&input.to_arrow_array())
|
||||
self.evaluate_datum(&input.to_arrow_array(), input.len())
|
||||
}
|
||||
|
||||
fn evaluate_datum(&self, input: &impl Datum) -> Result<BooleanBuffer> {
|
||||
fn evaluate_datum(&self, input: &impl Datum, input_len: usize) -> Result<BooleanBuffer> {
|
||||
let result = match self.op {
|
||||
Operator::Eq => cmp::eq(input, &self.literal),
|
||||
Operator::NotEq => cmp::neq(input, &self.literal),
|
||||
@@ -138,6 +172,15 @@ impl SimpleFilterEvaluator {
|
||||
Operator::LtEq => cmp::lt_eq(input, &self.literal),
|
||||
Operator::Gt => cmp::gt(input, &self.literal),
|
||||
Operator::GtEq => cmp::gt_eq(input, &self.literal),
|
||||
Operator::Or => {
|
||||
// OR operator stands for OR-chained EQs (or INLIST in other words)
|
||||
let mut result: BooleanArray = vec![false; input_len].into();
|
||||
for literal in &self.literal_list {
|
||||
let rhs = cmp::eq(input, literal).context(ArrowComputeSnafu)?;
|
||||
result = or_kleene(&result, &rhs).context(ArrowComputeSnafu)?;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedOperationSnafu {
|
||||
reason: format!("{:?}", self.op),
|
||||
@@ -349,4 +392,49 @@ mod test {
|
||||
let expected = datatypes::arrow::array::Int32Array::from(vec![5, 6]);
|
||||
assert_eq!(first_column_values, &expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complex_filter_expression() {
|
||||
// Create an expression tree for: col = 'B' OR col = 'C' OR col = 'D'
|
||||
let col_eq_b = col("col").eq(lit("B"));
|
||||
let col_eq_c = col("col").eq(lit("C"));
|
||||
let col_eq_d = col("col").eq(lit("D"));
|
||||
|
||||
// Build the OR chain
|
||||
let col_or_expr = col_eq_b.or(col_eq_c).or(col_eq_d);
|
||||
|
||||
// Check that SimpleFilterEvaluator can handle OR chain
|
||||
let or_evaluator = SimpleFilterEvaluator::try_new(&col_or_expr).unwrap();
|
||||
assert_eq!(or_evaluator.column_name, "col");
|
||||
assert_eq!(or_evaluator.op, Operator::Or);
|
||||
assert_eq!(or_evaluator.literal_list.len(), 3);
|
||||
assert_eq!(format!("{:?}", or_evaluator.literal_list), "[Scalar(StringArray\n[\n \"B\",\n]), Scalar(StringArray\n[\n \"C\",\n]), Scalar(StringArray\n[\n \"D\",\n])]");
|
||||
|
||||
// Create a schema and batch for testing
|
||||
let schema = Schema::new(vec![Field::new("col", DataType::Utf8, false)]);
|
||||
let df_schema = DFSchema::try_from(schema.clone()).unwrap();
|
||||
let props = ExecutionProps::new();
|
||||
let physical_expr = create_physical_expr(&col_or_expr, &df_schema, &props).unwrap();
|
||||
|
||||
// Create test data
|
||||
let col_data = Arc::new(datatypes::arrow::array::StringArray::from(vec![
|
||||
"B", "C", "E", "B", "C", "D", "F",
|
||||
]));
|
||||
let batch = RecordBatch::try_new(Arc::new(schema), vec![col_data]).unwrap();
|
||||
let expected = datatypes::arrow::array::StringArray::from(vec!["B", "C", "B", "C", "D"]);
|
||||
|
||||
// Filter the batch
|
||||
let filtered_batch = batch_filter(&batch, &physical_expr).unwrap();
|
||||
|
||||
// Expected: rows with col in ("B", "C", "D")
|
||||
// That would be rows 0, 1, 3, 4, 5
|
||||
assert_eq!(filtered_batch.num_rows(), 5);
|
||||
|
||||
let col_filtered = filtered_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<datatypes::arrow::array::StringArray>()
|
||||
.unwrap();
|
||||
assert_eq!(col_filtered, &expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ impl Eq for LoggingOptions {}
|
||||
impl Default for LoggingOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
level: None,
|
||||
log_format: LogFormat::Text,
|
||||
enable_otlp_tracing: false,
|
||||
|
||||
@@ -36,7 +36,7 @@ use servers::Mode;
|
||||
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::gb(5);
|
||||
|
||||
/// Default data home in file storage
|
||||
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
|
||||
const DEFAULT_DATA_HOME: &str = "./greptimedb_data";
|
||||
|
||||
/// Object storage config
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
|
||||
@@ -285,6 +285,20 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to i64. Return None if value is not a valid int64 data type.
|
||||
pub fn as_i64(&self) -> Option<i64> {
|
||||
match self {
|
||||
Value::Int8(v) => Some(*v as _),
|
||||
Value::Int16(v) => Some(*v as _),
|
||||
Value::Int32(v) => Some(*v as _),
|
||||
Value::Int64(v) => Some(*v),
|
||||
Value::UInt8(v) => Some(*v as _),
|
||||
Value::UInt16(v) => Some(*v as _),
|
||||
Value::UInt32(v) => Some(*v as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to u64. Return None if value is not a valid uint64 data type.
|
||||
pub fn as_u64(&self) -> Option<u64> {
|
||||
match self {
|
||||
@@ -295,7 +309,6 @@ impl Value {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast Value to f64. Return None if it's not castable;
|
||||
pub fn as_f64_lossy(&self) -> Option<f64> {
|
||||
match self {
|
||||
|
||||
@@ -70,7 +70,7 @@ use crate::state::{become_follower, become_leader, StateRef};
|
||||
|
||||
pub const TABLE_ID_SEQ: &str = "table_id";
|
||||
pub const FLOW_ID_SEQ: &str = "flow_id";
|
||||
pub const METASRV_HOME: &str = "/tmp/metasrv";
|
||||
pub const METASRV_HOME: &str = "./greptimedb_data/metasrv";
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub const DEFAULT_META_TABLE_NAME: &str = "greptime_metakv";
|
||||
|
||||
17
src/mito2/src/cache/file_cache.rs
vendored
17
src/mito2/src/cache/file_cache.rs
vendored
@@ -53,6 +53,8 @@ pub(crate) struct FileCache {
|
||||
///
|
||||
/// File id is enough to identity a file uniquely.
|
||||
memory_index: Cache<IndexKey, IndexValue>,
|
||||
/// Capacity of the cache.
|
||||
capacity: ReadableSize,
|
||||
}
|
||||
|
||||
pub(crate) type FileCacheRef = Arc<FileCache>;
|
||||
@@ -103,6 +105,7 @@ impl FileCache {
|
||||
FileCache {
|
||||
local_store,
|
||||
memory_index,
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -198,6 +201,20 @@ impl FileCache {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the available space in the file cache.
|
||||
pub(crate) fn available_space(&self) -> u64 {
|
||||
if self.capacity.as_bytes() > self.memory_index.weighted_size() {
|
||||
self.capacity.as_bytes() - self.memory_index.weighted_size()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the capacity of the file cache.
|
||||
pub(crate) fn capacity(&self) -> u64 {
|
||||
self.capacity.as_bytes()
|
||||
}
|
||||
|
||||
async fn recover_inner(&self) -> Result<()> {
|
||||
let now = Instant::now();
|
||||
let mut lister = self
|
||||
|
||||
10
src/mito2/src/cache/write_cache.rs
vendored
10
src/mito2/src/cache/write_cache.rs
vendored
@@ -263,6 +263,16 @@ impl WriteCache {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the available space in the write cache.
|
||||
pub(crate) fn available_space(&self) -> u64 {
|
||||
self.file_cache.available_space()
|
||||
}
|
||||
|
||||
/// Returns the capacity of the write cache.
|
||||
pub(crate) fn capacity(&self) -> u64 {
|
||||
self.file_cache.capacity()
|
||||
}
|
||||
|
||||
/// Uploads a Parquet file or a Puffin file to the remote object store.
|
||||
async fn upload(
|
||||
&self,
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
//! Handling open request.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use common_telemetry::info;
|
||||
use object_store::util::join_path;
|
||||
@@ -24,14 +25,18 @@ use store_api::region_request::RegionOpenRequest;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::STORAGE_KEY;
|
||||
|
||||
use crate::cache::file_cache::{FileType, IndexKey};
|
||||
use crate::cache::write_cache::WriteCacheRef;
|
||||
use crate::error::{
|
||||
ObjectStoreNotFoundSnafu, OpenDalSnafu, OpenRegionSnafu, RegionNotFoundSnafu, Result,
|
||||
};
|
||||
use crate::metrics::WRITE_CACHE_INFLIGHT_DOWNLOAD;
|
||||
use crate::region::opener::RegionOpener;
|
||||
use crate::request::OptionOutputTx;
|
||||
use crate::sst::location;
|
||||
use crate::wal::entry_distributor::WalEntryReceiver;
|
||||
use crate::worker::handle_drop::remove_region_dir_once;
|
||||
use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE};
|
||||
use crate::worker::{MitoRegionRef, RegionWorkerLoop, DROPPING_MARKER_FILE};
|
||||
|
||||
impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
async fn check_and_cleanup_region(
|
||||
@@ -118,6 +123,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
let opening_regions = self.opening_regions.clone();
|
||||
let region_count = self.region_count.clone();
|
||||
let worker_id = self.id;
|
||||
let write_cache = self.cache_manager.write_cache().cloned();
|
||||
opening_regions.insert_sender(region_id, sender);
|
||||
common_runtime::spawn_global(async move {
|
||||
match opener.open(&config, &wal).await {
|
||||
@@ -126,12 +132,17 @@ impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
region_count.inc();
|
||||
|
||||
// Insert the Region into the RegionMap.
|
||||
regions.insert_region(Arc::new(region));
|
||||
let region = Arc::new(region);
|
||||
regions.insert_region(region.clone());
|
||||
|
||||
let senders = opening_regions.remove_sender(region_id);
|
||||
for sender in senders {
|
||||
sender.send(Ok(0));
|
||||
}
|
||||
|
||||
if let Some(write_cache) = write_cache {
|
||||
prefetch_latest_ssts(region, write_cache).await;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let senders = opening_regions.remove_sender(region_id);
|
||||
@@ -144,3 +155,85 @@ impl<S: LogStore> RegionWorkerLoop<S> {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Download latest SSTs from the remote storage for the region.
|
||||
async fn prefetch_latest_ssts(region: MitoRegionRef, write_cache: WriteCacheRef) {
|
||||
let version = region.version();
|
||||
// Sort ssts by time range in descending order.
|
||||
let mut ssts: Vec<_> = version
|
||||
.ssts
|
||||
.levels()
|
||||
.iter()
|
||||
.flat_map(|level| level.files())
|
||||
.collect();
|
||||
ssts.sort_unstable_by(|left, right| right.time_range().1.cmp(&left.time_range().1));
|
||||
|
||||
let layer = region.access_layer.clone();
|
||||
let region_id = region.region_id;
|
||||
// Prefetch the latest SSTs.
|
||||
let mut has_err = false;
|
||||
let mut fetched = 0;
|
||||
let mut downloaded_bytes = 0;
|
||||
let start = Instant::now();
|
||||
for sst in ssts {
|
||||
if has_err || write_cache.available_space() <= write_cache.capacity() / 2 {
|
||||
break;
|
||||
}
|
||||
|
||||
WRITE_CACHE_INFLIGHT_DOWNLOAD.add(1);
|
||||
|
||||
let file_meta = sst.meta_ref();
|
||||
let index_key = IndexKey::new(region_id, file_meta.file_id, FileType::Parquet);
|
||||
let remote_path = location::sst_file_path(layer.region_dir(), file_meta.file_id);
|
||||
let file_size = file_meta.file_size;
|
||||
if let Err(err) = write_cache
|
||||
.download(index_key, &remote_path, layer.object_store(), file_size)
|
||||
.await
|
||||
{
|
||||
common_telemetry::error!(
|
||||
err; "Failed to download parquet file, region_id: {}, index_key: {:?}, remote_path: {}", region_id, index_key, remote_path
|
||||
);
|
||||
has_err = true;
|
||||
} else {
|
||||
fetched += 1;
|
||||
downloaded_bytes += file_size;
|
||||
}
|
||||
|
||||
let is_index_exist = file_meta.exists_index();
|
||||
if !has_err && is_index_exist {
|
||||
let index_file_size = file_meta.index_file_size();
|
||||
let index_file_index_key =
|
||||
IndexKey::new(region_id, file_meta.file_id, FileType::Puffin);
|
||||
let index_remote_path =
|
||||
location::index_file_path(layer.region_dir(), file_meta.file_id);
|
||||
// also download puffin file
|
||||
if let Err(err) = write_cache
|
||||
.download(
|
||||
index_file_index_key,
|
||||
&index_remote_path,
|
||||
layer.object_store(),
|
||||
index_file_size,
|
||||
)
|
||||
.await
|
||||
{
|
||||
common_telemetry::error!(
|
||||
err; "Failed to download puffin file, region_id: {}, index_file_index_key: {:?}, index_remote_path: {}", region_id, index_file_index_key, index_remote_path
|
||||
);
|
||||
has_err = true;
|
||||
} else {
|
||||
fetched += 1;
|
||||
downloaded_bytes += index_file_size;
|
||||
}
|
||||
}
|
||||
|
||||
WRITE_CACHE_INFLIGHT_DOWNLOAD.sub(1);
|
||||
}
|
||||
|
||||
common_telemetry::info!(
|
||||
"region {} prefetched {} files with total {} bytes, elapsed: {:?}",
|
||||
region_id,
|
||||
fetched,
|
||||
downloaded_bytes,
|
||||
start.elapsed(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ pub mod join;
|
||||
pub mod json_path;
|
||||
pub mod letter;
|
||||
pub mod regex;
|
||||
pub mod simple_extract;
|
||||
pub mod timestamp;
|
||||
pub mod urlencoding;
|
||||
|
||||
@@ -51,6 +52,7 @@ use super::error::{
|
||||
use super::field::{Field, Fields};
|
||||
use super::PipelineMap;
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::etl::processor::simple_extract::SimpleExtractProcessor;
|
||||
use crate::etl_error::UnsupportedProcessorSnafu;
|
||||
|
||||
const FIELD_NAME: &str = "field";
|
||||
@@ -63,6 +65,7 @@ const SEPARATOR_NAME: &str = "separator";
|
||||
const TARGET_FIELDS_NAME: &str = "target_fields";
|
||||
const JSON_PATH_NAME: &str = "json_path";
|
||||
const JSON_PATH_RESULT_INDEX_NAME: &str = "result_index";
|
||||
const SIMPLE_EXTRACT_KEY_NAME: &str = "key";
|
||||
|
||||
/// Processor trait defines the interface for all processors.
|
||||
///
|
||||
@@ -97,6 +100,7 @@ pub enum ProcessorKind {
|
||||
Epoch(EpochProcessor),
|
||||
Date(DateProcessor),
|
||||
JsonPath(JsonPathProcessor),
|
||||
SimpleJsonPath(SimpleExtractProcessor),
|
||||
Decolorize(DecolorizeProcessor),
|
||||
Digest(DigestProcessor),
|
||||
}
|
||||
@@ -174,6 +178,9 @@ fn parse_processor(doc: &yaml_rust::Yaml) -> Result<ProcessorKind> {
|
||||
ProcessorKind::Decolorize(DecolorizeProcessor::try_from(value)?)
|
||||
}
|
||||
digest::PROCESSOR_DIGEST => ProcessorKind::Digest(DigestProcessor::try_from(value)?),
|
||||
simple_extract::PROCESSOR_SIMPLE_EXTRACT => {
|
||||
ProcessorKind::SimpleJsonPath(SimpleExtractProcessor::try_from(value)?)
|
||||
}
|
||||
_ => return UnsupportedProcessorSnafu { processor: str_key }.fail(),
|
||||
};
|
||||
|
||||
|
||||
148
src/pipeline/src/etl/processor/simple_extract.rs
Normal file
148
src/pipeline/src/etl/processor/simple_extract.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::OptionExt as _;
|
||||
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::etl::field::Fields;
|
||||
use crate::etl::processor::{
|
||||
yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME,
|
||||
IGNORE_MISSING_NAME, SIMPLE_EXTRACT_KEY_NAME,
|
||||
};
|
||||
use crate::etl_error::{KeyMustBeStringSnafu, ProcessorMissingFieldSnafu};
|
||||
use crate::{PipelineMap, Processor, Value};
|
||||
|
||||
pub(crate) const PROCESSOR_SIMPLE_EXTRACT: &str = "simple_extract";
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SimpleExtractProcessor {
|
||||
fields: Fields,
|
||||
/// simple keys to extract nested JSON field
|
||||
/// key `a.b` is saved as ['a', 'b'], each key represents a level of the JSON tree
|
||||
key: Vec<String>,
|
||||
ignore_missing: bool,
|
||||
}
|
||||
|
||||
impl TryFrom<&yaml_rust::yaml::Hash> for SimpleExtractProcessor {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: &yaml_rust::yaml::Hash) -> std::result::Result<Self, Self::Error> {
|
||||
let mut fields = Fields::default();
|
||||
let mut ignore_missing = false;
|
||||
let mut keys = vec![];
|
||||
|
||||
for (k, v) in value.iter() {
|
||||
let key = k
|
||||
.as_str()
|
||||
.with_context(|| KeyMustBeStringSnafu { k: k.clone() })?;
|
||||
match key {
|
||||
FIELD_NAME => {
|
||||
fields = Fields::one(yaml_new_field(v, FIELD_NAME)?);
|
||||
}
|
||||
FIELDS_NAME => {
|
||||
fields = yaml_new_fields(v, FIELDS_NAME)?;
|
||||
}
|
||||
IGNORE_MISSING_NAME => {
|
||||
ignore_missing = yaml_bool(v, IGNORE_MISSING_NAME)?;
|
||||
}
|
||||
SIMPLE_EXTRACT_KEY_NAME => {
|
||||
let key_str = yaml_string(v, SIMPLE_EXTRACT_KEY_NAME)?;
|
||||
keys.extend(key_str.split(".").map(|s| s.to_string()));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let processor = SimpleExtractProcessor {
|
||||
fields,
|
||||
key: keys,
|
||||
ignore_missing,
|
||||
};
|
||||
|
||||
Ok(processor)
|
||||
}
|
||||
}
|
||||
|
||||
impl SimpleExtractProcessor {
|
||||
fn process_field(&self, val: &Value) -> Result<Value> {
|
||||
let mut current = val;
|
||||
for key in self.key.iter() {
|
||||
let Value::Map(map) = current else {
|
||||
return Ok(Value::Null);
|
||||
};
|
||||
let Some(v) = map.get(key) else {
|
||||
return Ok(Value::Null);
|
||||
};
|
||||
current = v;
|
||||
}
|
||||
Ok(current.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Processor for SimpleExtractProcessor {
|
||||
fn kind(&self) -> &str {
|
||||
PROCESSOR_SIMPLE_EXTRACT
|
||||
}
|
||||
|
||||
fn ignore_missing(&self) -> bool {
|
||||
self.ignore_missing
|
||||
}
|
||||
|
||||
fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
|
||||
for field in self.fields.iter() {
|
||||
let index = field.input_field();
|
||||
match val.get(index) {
|
||||
Some(v) => {
|
||||
let processed = self.process_field(v)?;
|
||||
let output_index = field.target_or_input_field();
|
||||
val.insert(output_index.to_string(), processed);
|
||||
}
|
||||
None => {
|
||||
if !self.ignore_missing {
|
||||
return ProcessorMissingFieldSnafu {
|
||||
processor: self.kind(),
|
||||
field: field.input_field(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
#[test]
|
||||
fn test_json_path() {
|
||||
use super::*;
|
||||
use crate::{Map, Value};
|
||||
|
||||
let processor = SimpleExtractProcessor {
|
||||
key: vec!["hello".to_string()],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = processor
|
||||
.process_field(&Value::Map(Map::one(
|
||||
"hello",
|
||||
Value::String("world".to_string()),
|
||||
)))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, Value::String("world".to_string()));
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ pub mod transformer;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::etl::error::{Error, Result};
|
||||
use crate::etl::processor::yaml_bool;
|
||||
use crate::etl::transform::index::Index;
|
||||
use crate::etl::value::Value;
|
||||
|
||||
@@ -25,6 +26,7 @@ const TRANSFORM_FIELD: &str = "field";
|
||||
const TRANSFORM_FIELDS: &str = "fields";
|
||||
const TRANSFORM_TYPE: &str = "type";
|
||||
const TRANSFORM_INDEX: &str = "index";
|
||||
const TRANSFORM_TAG: &str = "tag";
|
||||
const TRANSFORM_DEFAULT: &str = "default";
|
||||
const TRANSFORM_ON_FAILURE: &str = "on_failure";
|
||||
|
||||
@@ -144,6 +146,8 @@ pub struct Transform {
|
||||
|
||||
pub index: Option<Index>,
|
||||
|
||||
pub tag: bool,
|
||||
|
||||
pub on_failure: Option<OnFailure>,
|
||||
}
|
||||
|
||||
@@ -154,6 +158,7 @@ impl Default for Transform {
|
||||
type_: Value::Null,
|
||||
default: None,
|
||||
index: None,
|
||||
tag: false,
|
||||
on_failure: None,
|
||||
}
|
||||
}
|
||||
@@ -185,6 +190,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
|
||||
let mut type_ = Value::Null;
|
||||
let mut default = None;
|
||||
let mut index = None;
|
||||
let mut tag = false;
|
||||
let mut on_failure = None;
|
||||
|
||||
for (k, v) in hash {
|
||||
@@ -210,6 +216,10 @@ impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
|
||||
index = Some(index_str.try_into()?);
|
||||
}
|
||||
|
||||
TRANSFORM_TAG => {
|
||||
tag = yaml_bool(v, TRANSFORM_TAG)?;
|
||||
}
|
||||
|
||||
TRANSFORM_DEFAULT => {
|
||||
default = Some(Value::try_from(v)?);
|
||||
}
|
||||
@@ -247,6 +257,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
|
||||
default: final_default,
|
||||
index,
|
||||
on_failure,
|
||||
tag,
|
||||
};
|
||||
|
||||
Ok(builder)
|
||||
|
||||
@@ -19,14 +19,17 @@ const INDEX_TIMEINDEX: &str = "time";
|
||||
const INDEX_TAG: &str = "tag";
|
||||
const INDEX_FULLTEXT: &str = "fulltext";
|
||||
const INDEX_SKIPPING: &str = "skipping";
|
||||
const INDEX_INVERTED: &str = "inverted";
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum Index {
|
||||
Time,
|
||||
// deprecated, use Inverted instead
|
||||
Tag,
|
||||
Fulltext,
|
||||
Skipping,
|
||||
Inverted,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Index {
|
||||
@@ -36,6 +39,7 @@ impl std::fmt::Display for Index {
|
||||
Index::Tag => INDEX_TAG,
|
||||
Index::Fulltext => INDEX_FULLTEXT,
|
||||
Index::Skipping => INDEX_SKIPPING,
|
||||
Index::Inverted => INDEX_INVERTED,
|
||||
};
|
||||
|
||||
write!(f, "{}", index)
|
||||
@@ -59,6 +63,7 @@ impl TryFrom<&str> for Index {
|
||||
INDEX_TAG => Ok(Index::Tag),
|
||||
INDEX_FULLTEXT => Ok(Index::Fulltext),
|
||||
INDEX_SKIPPING => Ok(Index::Skipping),
|
||||
INDEX_INVERTED => Ok(Index::Inverted),
|
||||
_ => UnsupportedIndexTypeSnafu { value }.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,6 +96,7 @@ impl GreptimeTransformer {
|
||||
default,
|
||||
index: Some(Index::Time),
|
||||
on_failure: Some(crate::etl::transform::OnFailure::Default),
|
||||
tag: false,
|
||||
};
|
||||
transforms.push(transform);
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
use api::v1::column_def::{options_from_fulltext, options_from_skipping};
|
||||
use api::v1::column_def::{options_from_fulltext, options_from_inverted, options_from_skipping};
|
||||
use api::v1::{ColumnDataTypeExtension, ColumnOptions, JsonTypeExtension};
|
||||
use datatypes::schema::{FulltextOptions, SkippingIndexOptions};
|
||||
use greptime_proto::v1::value::ValueData;
|
||||
@@ -95,10 +95,16 @@ pub(crate) fn coerce_columns(transform: &Transform) -> Result<Vec<ColumnSchema>>
|
||||
}
|
||||
|
||||
fn coerce_semantic_type(transform: &Transform) -> SemanticType {
|
||||
if transform.tag {
|
||||
return SemanticType::Tag;
|
||||
}
|
||||
|
||||
match transform.index {
|
||||
Some(Index::Tag) => SemanticType::Tag,
|
||||
Some(Index::Time) => SemanticType::Timestamp,
|
||||
Some(Index::Fulltext) | Some(Index::Skipping) | None => SemanticType::Field,
|
||||
Some(Index::Fulltext) | Some(Index::Skipping) | Some(Index::Inverted) | None => {
|
||||
SemanticType::Field
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,6 +118,7 @@ fn coerce_options(transform: &Transform) -> Result<Option<ColumnOptions>> {
|
||||
Some(Index::Skipping) => {
|
||||
options_from_skipping(&SkippingIndexOptions::default()).context(ColumnOptionsSnafu)
|
||||
}
|
||||
Some(Index::Inverted) => Ok(Some(options_from_inverted())),
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
@@ -478,6 +485,7 @@ mod tests {
|
||||
default: None,
|
||||
index: None,
|
||||
on_failure: None,
|
||||
tag: false,
|
||||
};
|
||||
|
||||
// valid string
|
||||
@@ -503,6 +511,7 @@ mod tests {
|
||||
default: None,
|
||||
index: None,
|
||||
on_failure: Some(OnFailure::Ignore),
|
||||
tag: false,
|
||||
};
|
||||
|
||||
let val = Value::String("hello".to_string());
|
||||
@@ -518,6 +527,7 @@ mod tests {
|
||||
default: None,
|
||||
index: None,
|
||||
on_failure: Some(OnFailure::Default),
|
||||
tag: false,
|
||||
};
|
||||
|
||||
// with no explicit default value
|
||||
|
||||
69
src/pipeline/tests/simple_extract.rs
Normal file
69
src/pipeline/tests/simple_extract.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod common;
|
||||
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
static ref EXPECTED_SCHEMA: Vec<ColumnSchema> = vec![
|
||||
common::make_column_schema(
|
||||
"commit_author".to_string(),
|
||||
ColumnDataType::String,
|
||||
SemanticType::Field,
|
||||
),
|
||||
common::make_column_schema(
|
||||
"greptime_timestamp".to_string(),
|
||||
ColumnDataType::TimestampNanosecond,
|
||||
SemanticType::Timestamp,
|
||||
),
|
||||
];
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gsub() {
|
||||
let input_value_str = r#"
|
||||
[
|
||||
{
|
||||
"commit": {
|
||||
"commitTime": "1573840000.000",
|
||||
"commitAuthor": "test"
|
||||
}
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let pipeline_yaml = r#"
|
||||
---
|
||||
processors:
|
||||
- simple_extract:
|
||||
field: commit, commit_author
|
||||
key: "commitAuthor"
|
||||
|
||||
transform:
|
||||
- field: commit_author
|
||||
type: string
|
||||
"#;
|
||||
|
||||
let output = common::parse_and_exec(input_value_str, pipeline_yaml);
|
||||
|
||||
assert_eq!(output.schema, *EXPECTED_SCHEMA);
|
||||
|
||||
assert_eq!(
|
||||
output.rows[0].values[0].value_data,
|
||||
Some(ValueData::StringValue("test".to_string()))
|
||||
);
|
||||
}
|
||||
@@ -12,12 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::io::BufRead;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use api::v1::RowInsertRequests;
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use axum::extract::{FromRequest, Multipart, Path, Query, Request, State};
|
||||
use axum::http::header::CONTENT_TYPE;
|
||||
use axum::http::{HeaderMap, StatusCode};
|
||||
@@ -389,8 +391,8 @@ pub struct PipelineDryrunParams {
|
||||
/// Check if the payload is valid json
|
||||
/// Check if the payload contains pipeline or pipeline_name and data
|
||||
/// Return Some if valid, None if invalid
|
||||
fn check_pipeline_dryrun_params_valid(payload: &str) -> Option<PipelineDryrunParams> {
|
||||
match serde_json::from_str::<PipelineDryrunParams>(payload) {
|
||||
fn check_pipeline_dryrun_params_valid(payload: &Bytes) -> Option<PipelineDryrunParams> {
|
||||
match serde_json::from_slice::<PipelineDryrunParams>(payload) {
|
||||
// payload with pipeline or pipeline_name and data is array
|
||||
Ok(params) if params.pipeline.is_some() || params.pipeline_name.is_some() => Some(params),
|
||||
// because of the pipeline_name or pipeline is required
|
||||
@@ -432,7 +434,7 @@ pub async fn pipeline_dryrun(
|
||||
Query(query_params): Query<LogIngesterQueryParams>,
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TypedHeader(content_type): TypedHeader<ContentType>,
|
||||
payload: String,
|
||||
payload: Bytes,
|
||||
) -> Result<Response> {
|
||||
let handler = log_state.log_handler;
|
||||
|
||||
@@ -514,7 +516,7 @@ pub async fn log_ingester(
|
||||
Extension(mut query_ctx): Extension<QueryContext>,
|
||||
TypedHeader(content_type): TypedHeader<ContentType>,
|
||||
headers: HeaderMap,
|
||||
payload: String,
|
||||
payload: Bytes,
|
||||
) -> Result<HttpResponse> {
|
||||
// validate source and payload
|
||||
let source = query_params.source.as_deref();
|
||||
@@ -565,40 +567,45 @@ pub async fn log_ingester(
|
||||
|
||||
fn extract_pipeline_value_by_content_type(
|
||||
content_type: ContentType,
|
||||
payload: String,
|
||||
payload: Bytes,
|
||||
ignore_errors: bool,
|
||||
) -> Result<Vec<Value>> {
|
||||
Ok(match content_type {
|
||||
ct if ct == *JSON_CONTENT_TYPE => transform_ndjson_array_factory(
|
||||
Deserializer::from_str(&payload).into_iter(),
|
||||
Deserializer::from_slice(&payload).into_iter(),
|
||||
ignore_errors,
|
||||
)?,
|
||||
ct if ct == *NDJSON_CONTENT_TYPE => {
|
||||
let mut result = Vec::with_capacity(1000);
|
||||
for (index, line) in payload.lines().enumerate() {
|
||||
match serde_json::from_str(line) {
|
||||
Ok(v) => {
|
||||
result.push(v);
|
||||
}
|
||||
Err(_) => {
|
||||
if !ignore_errors {
|
||||
warn!(
|
||||
"invalid json item in array, index: {:?}, value: {:?}",
|
||||
index, line
|
||||
);
|
||||
return InvalidParameterSnafu {
|
||||
reason: format!("invalid item:{} in array", line),
|
||||
}
|
||||
.fail();
|
||||
let line = match line {
|
||||
Ok(line) if !line.is_empty() => line,
|
||||
Ok(_) => continue, // Skip empty lines
|
||||
Err(_) if ignore_errors => continue,
|
||||
Err(e) => {
|
||||
warn!(e; "invalid string at index: {}", index);
|
||||
return InvalidParameterSnafu {
|
||||
reason: format!("invalid line at index: {}", index),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
|
||||
if let Ok(v) = serde_json::from_str(&line) {
|
||||
result.push(v);
|
||||
} else if !ignore_errors {
|
||||
warn!("invalid JSON at index: {}, content: {:?}", index, line);
|
||||
return InvalidParameterSnafu {
|
||||
reason: format!("invalid JSON at index: {}", index),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
ct if ct == *TEXT_CONTENT_TYPE || ct == *TEXT_UTF8_CONTENT_TYPE => payload
|
||||
.lines()
|
||||
.filter(|line| !line.is_empty())
|
||||
.filter_map(|line| line.ok().filter(|line| !line.is_empty()))
|
||||
.map(|line| json!({"message": line}))
|
||||
.collect(),
|
||||
_ => UnsupportedContentTypeSnafu { content_type }.fail()?,
|
||||
@@ -677,7 +684,8 @@ pub(crate) async fn ingest_logs_inner(
|
||||
pub trait LogValidator: Send + Sync {
|
||||
/// validate payload by source before processing
|
||||
/// Return a `Some` result to indicate validation failure.
|
||||
async fn validate(&self, source: Option<&str>, payload: &str) -> Option<Result<HttpResponse>>;
|
||||
async fn validate(&self, source: Option<&str>, payload: &Bytes)
|
||||
-> Option<Result<HttpResponse>>;
|
||||
}
|
||||
|
||||
pub type LogValidatorRef = Arc<dyn LogValidator + 'static>;
|
||||
@@ -731,17 +739,17 @@ mod tests {
|
||||
{"a": 1}
|
||||
{"b": 2"}
|
||||
{"c": 1}
|
||||
"#;
|
||||
"#
|
||||
.as_bytes();
|
||||
let payload = Bytes::from_static(payload);
|
||||
|
||||
let fail_rest =
|
||||
extract_pipeline_value_by_content_type(ContentType::json(), payload.to_string(), true);
|
||||
extract_pipeline_value_by_content_type(ContentType::json(), payload.clone(), true);
|
||||
assert!(fail_rest.is_ok());
|
||||
assert_eq!(fail_rest.unwrap(), vec![json!({"a": 1})]);
|
||||
|
||||
let fail_only_wrong = extract_pipeline_value_by_content_type(
|
||||
NDJSON_CONTENT_TYPE.clone(),
|
||||
payload.to_string(),
|
||||
true,
|
||||
);
|
||||
let fail_only_wrong =
|
||||
extract_pipeline_value_by_content_type(NDJSON_CONTENT_TYPE.clone(), payload, true);
|
||||
assert!(fail_only_wrong.is_ok());
|
||||
assert_eq!(
|
||||
fail_only_wrong.unwrap(),
|
||||
|
||||
@@ -32,22 +32,22 @@ pub struct TraceSpan {
|
||||
pub service_name: Option<String>,
|
||||
pub trace_id: String,
|
||||
pub span_id: String,
|
||||
pub parent_span_id: String,
|
||||
pub parent_span_id: Option<String>,
|
||||
|
||||
// the following are fields
|
||||
pub resource_attributes: Attributes, // TODO(yuanbohan): Map in the future
|
||||
pub resource_attributes: Attributes,
|
||||
pub scope_name: String,
|
||||
pub scope_version: String,
|
||||
pub scope_attributes: Attributes, // TODO(yuanbohan): Map in the future
|
||||
pub scope_attributes: Attributes,
|
||||
pub trace_state: String,
|
||||
pub span_name: String,
|
||||
pub span_kind: String,
|
||||
pub span_status_code: String,
|
||||
pub span_status_message: String,
|
||||
pub span_attributes: Attributes, // TODO(yuanbohan): Map in the future
|
||||
pub span_events: SpanEvents, // TODO(yuanbohan): List in the future
|
||||
pub span_links: SpanLinks, // TODO(yuanbohan): List in the future
|
||||
pub start_in_nanosecond: u64, // this is also the Timestamp Index
|
||||
pub span_attributes: Attributes,
|
||||
pub span_events: SpanEvents, // TODO(yuanbohan): List in the future
|
||||
pub span_links: SpanLinks, // TODO(yuanbohan): List in the future
|
||||
pub start_in_nanosecond: u64, // this is also the Timestamp Index
|
||||
pub end_in_nanosecond: u64,
|
||||
}
|
||||
|
||||
@@ -203,7 +203,11 @@ pub fn parse_span(
|
||||
service_name,
|
||||
trace_id: bytes_to_hex_string(&span.trace_id),
|
||||
span_id: bytes_to_hex_string(&span.span_id),
|
||||
parent_span_id: bytes_to_hex_string(&span.parent_span_id),
|
||||
parent_span_id: if span.parent_span_id.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(bytes_to_hex_string(&span.parent_span_id))
|
||||
},
|
||||
|
||||
resource_attributes: Attributes::from(resource_attrs),
|
||||
trace_state: span.trace_state,
|
||||
|
||||
@@ -91,7 +91,10 @@ pub fn write_span_to_row(writer: &mut TableData, span: TraceSpan) -> Result<()>
|
||||
let iter = vec![
|
||||
(TRACE_ID_COLUMN, span.trace_id),
|
||||
(SPAN_ID_COLUMN, span.span_id),
|
||||
(PARENT_SPAN_ID_COLUMN, span.parent_span_id),
|
||||
(
|
||||
PARENT_SPAN_ID_COLUMN,
|
||||
span.parent_span_id.unwrap_or_default(),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(col, val)| (col.to_string(), val));
|
||||
|
||||
@@ -103,8 +103,18 @@ pub fn write_span_to_row(writer: &mut TableData, span: TraceSpan) -> Result<()>
|
||||
row_writer::write_tags(writer, tags.into_iter(), &mut row)?;
|
||||
|
||||
// write fields
|
||||
if let Some(parent_span_id) = span.parent_span_id {
|
||||
row_writer::write_fields(
|
||||
writer,
|
||||
std::iter::once(make_string_column_data(
|
||||
PARENT_SPAN_ID_COLUMN,
|
||||
parent_span_id,
|
||||
)),
|
||||
&mut row,
|
||||
)?;
|
||||
}
|
||||
|
||||
let fields = vec![
|
||||
make_string_column_data(PARENT_SPAN_ID_COLUMN, span.parent_span_id),
|
||||
make_string_column_data(SPAN_KIND_COLUMN, span.span_kind),
|
||||
make_string_column_data(SPAN_NAME_COLUMN, span.span_name),
|
||||
make_string_column_data("span_status_code", span.span_status_code),
|
||||
|
||||
@@ -391,7 +391,7 @@ async fn test_config() {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
let rs = http_handler::config(State(GreptimeOptionsConfigState {
|
||||
greptime_config_options: toml_str.to_string(),
|
||||
|
||||
@@ -37,16 +37,16 @@ common-telemetry.workspace = true
|
||||
common-test-util.workspace = true
|
||||
common-time.workspace = true
|
||||
common-wal.workspace = true
|
||||
datanode = { workspace = true }
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
dotenv.workspace = true
|
||||
flate2 = "1.0"
|
||||
flate2.workspace = true
|
||||
flow.workspace = true
|
||||
frontend = { workspace = true, features = ["testing"] }
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
hyper-util = { workspace = true, features = ["tokio"] }
|
||||
log-query = { workspace = true }
|
||||
log-query.workspace = true
|
||||
loki-proto.workspace = true
|
||||
meta-client.workspace = true
|
||||
meta-srv = { workspace = true, features = ["mock"] }
|
||||
@@ -96,5 +96,5 @@ prost.workspace = true
|
||||
rand.workspace = true
|
||||
session = { workspace = true, features = ["testing"] }
|
||||
store-api.workspace = true
|
||||
tokio-postgres = { workspace = true }
|
||||
tokio-postgres.workspace = true
|
||||
url = "2.3"
|
||||
|
||||
@@ -1265,15 +1265,18 @@ transform:
|
||||
- id1
|
||||
- id2
|
||||
type: int32
|
||||
index: inverted
|
||||
- fields:
|
||||
- logger
|
||||
type: string
|
||||
- field: type
|
||||
type: string
|
||||
index: skipping
|
||||
tag: true
|
||||
- field: log
|
||||
type: string
|
||||
index: fulltext
|
||||
tag: true
|
||||
- field: time
|
||||
type: time
|
||||
index: timestamp
|
||||
@@ -1348,8 +1351,7 @@ transform:
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
|
||||
// 3. check schema
|
||||
|
||||
let expected_schema = "[[\"logs1\",\"CREATE TABLE IF NOT EXISTS \\\"logs1\\\" (\\n \\\"id1\\\" INT NULL,\\n \\\"id2\\\" INT NULL,\\n \\\"logger\\\" STRING NULL,\\n \\\"type\\\" STRING NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM'),\\n \\\"log\\\" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false'),\\n \\\"time\\\" TIMESTAMP(9) NOT NULL,\\n TIME INDEX (\\\"time\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]";
|
||||
let expected_schema = "[[\"logs1\",\"CREATE TABLE IF NOT EXISTS \\\"logs1\\\" (\\n \\\"id1\\\" INT NULL INVERTED INDEX,\\n \\\"id2\\\" INT NULL INVERTED INDEX,\\n \\\"logger\\\" STRING NULL,\\n \\\"type\\\" STRING NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM'),\\n \\\"log\\\" STRING NULL FULLTEXT INDEX WITH(analyzer = 'English', case_sensitive = 'false'),\\n \\\"time\\\" TIMESTAMP(9) NOT NULL,\\n TIME INDEX (\\\"time\\\"),\\n PRIMARY KEY (\\\"type\\\", \\\"log\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]";
|
||||
validate_data(
|
||||
"pipeline_schema",
|
||||
&client,
|
||||
@@ -2195,7 +2197,7 @@ pub async fn test_otlp_traces_v1(store_type: StorageType) {
|
||||
assert_eq!(StatusCode::OK, res.status());
|
||||
|
||||
// select traces data
|
||||
let expected = r#"[[1736480942444376000,1736480942444499000,123000,"c05d7a4ec8e1f231f02ed6e8da8655b4","9630f2916e2f7909","d24f921c75f68e23","SPAN_KIND_SERVER","okey-dokey-0","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-client",[],[]],[1736480942444376000,1736480942444499000,123000,"c05d7a4ec8e1f231f02ed6e8da8655b4","d24f921c75f68e23","","SPAN_KIND_CLIENT","lets-go","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-server",[],[]],[1736480942444589000,1736480942444712000,123000,"cc9e0991a2e63d274984bd44ee669203","8f847259b0f6e1ab","eba7be77e3558179","SPAN_KIND_SERVER","okey-dokey-0","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-client",[],[]],[1736480942444589000,1736480942444712000,123000,"cc9e0991a2e63d274984bd44ee669203","eba7be77e3558179","","SPAN_KIND_CLIENT","lets-go","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-server",[],[]]]"#;
|
||||
let expected = r#"[[1736480942444376000,1736480942444499000,123000,"c05d7a4ec8e1f231f02ed6e8da8655b4","9630f2916e2f7909","d24f921c75f68e23","SPAN_KIND_SERVER","okey-dokey-0","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-client",[],[]],[1736480942444376000,1736480942444499000,123000,"c05d7a4ec8e1f231f02ed6e8da8655b4","d24f921c75f68e23",null,"SPAN_KIND_CLIENT","lets-go","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-server",[],[]],[1736480942444589000,1736480942444712000,123000,"cc9e0991a2e63d274984bd44ee669203","8f847259b0f6e1ab","eba7be77e3558179","SPAN_KIND_SERVER","okey-dokey-0","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-client",[],[]],[1736480942444589000,1736480942444712000,123000,"cc9e0991a2e63d274984bd44ee669203","eba7be77e3558179",null,"SPAN_KIND_CLIENT","lets-go","STATUS_CODE_UNSET","","","telemetrygen","","telemetrygen","1.2.3.4","telemetrygen-server",[],[]]]"#;
|
||||
validate_data("otlp_traces", &client, "select * from mytable;", expected).await;
|
||||
|
||||
let expected_ddl = r#"[["mytable","CREATE TABLE IF NOT EXISTS \"mytable\" (\n \"timestamp\" TIMESTAMP(9) NOT NULL,\n \"timestamp_end\" TIMESTAMP(9) NULL,\n \"duration_nano\" BIGINT UNSIGNED NULL,\n \"trace_id\" STRING NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM'),\n \"span_id\" STRING NULL,\n \"parent_span_id\" STRING NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM'),\n \"span_kind\" STRING NULL,\n \"span_name\" STRING NULL SKIPPING INDEX WITH(granularity = '10240', type = 'BLOOM'),\n \"span_status_code\" STRING NULL,\n \"span_status_message\" STRING NULL,\n \"trace_state\" STRING NULL,\n \"scope_name\" STRING NULL,\n \"scope_version\" STRING NULL,\n \"service_name\" STRING NULL,\n \"span_attributes.net.peer.ip\" STRING NULL,\n \"span_attributes.peer.service\" STRING NULL,\n \"span_events\" JSON NULL,\n \"span_links\" JSON NULL,\n TIME INDEX (\"timestamp\"),\n PRIMARY KEY (\"trace_id\", \"span_id\")\n)\nPARTITION ON COLUMNS (\"trace_id\") (\n trace_id < '1',\n trace_id >= '1' AND trace_id < '2',\n trace_id >= '2' AND trace_id < '3',\n trace_id >= '3' AND trace_id < '4',\n trace_id >= '4' AND trace_id < '5',\n trace_id >= '5' AND trace_id < '6',\n trace_id >= '6' AND trace_id < '7',\n trace_id >= '7' AND trace_id < '8',\n trace_id >= '8' AND trace_id < '9',\n trace_id >= '9' AND trace_id < 'A',\n trace_id >= 'A' AND trace_id < 'B' OR trace_id >= 'a' AND trace_id < 'b',\n trace_id >= 'B' AND trace_id < 'C' OR trace_id >= 'b' AND trace_id < 'c',\n trace_id >= 'C' AND trace_id < 'D' OR trace_id >= 'c' AND trace_id < 'd',\n trace_id >= 'D' AND trace_id < 'E' OR trace_id >= 'd' AND trace_id < 'e',\n trace_id >= 'E' AND trace_id < 'F' OR trace_id >= 'e' AND trace_id < 'f',\n trace_id >= 'F' AND trace_id < 'a' OR trace_id >= 'f'\n)\nENGINE=mito\nWITH(\n append_mode = 'true'\n)"]]"#;
|
||||
|
||||
@@ -326,3 +326,53 @@ FROM (
|
||||
| [7.0, 8.0, 9.0, 10.0] | 4 |
|
||||
+-----------------------+------------+
|
||||
|
||||
SELECT vec_to_string(vec_subvector('[1.0,2.0,3.0,4.0,5.0]', 0, 3));
|
||||
|
||||
+-------------------------------------------------------------------------------+
|
||||
| vec_to_string(vec_subvector(Utf8("[1.0,2.0,3.0,4.0,5.0]"),Int64(0),Int64(3))) |
|
||||
+-------------------------------------------------------------------------------+
|
||||
| [1,2,3] |
|
||||
+-------------------------------------------------------------------------------+
|
||||
|
||||
SELECT vec_to_string(vec_subvector('[1.0,2.0,3.0,4.0,5.0]', 5, 5));
|
||||
|
||||
+-------------------------------------------------------------------------------+
|
||||
| vec_to_string(vec_subvector(Utf8("[1.0,2.0,3.0,4.0,5.0]"),Int64(5),Int64(5))) |
|
||||
+-------------------------------------------------------------------------------+
|
||||
| [] |
|
||||
+-------------------------------------------------------------------------------+
|
||||
|
||||
SELECT v, vec_to_string(vec_subvector(v, 3, 5))
|
||||
FROM (
|
||||
SELECT '[1.0, 2.0, 3.0, 4.0, 5.0]' AS v
|
||||
UNION ALL
|
||||
SELECT '[-1.0, -2.0, -3.0, -4.0, -5.0, -6.0]' AS v
|
||||
UNION ALL
|
||||
SELECT '[4.0, 5.0, 6.0, 10, -8, 100]' AS v
|
||||
) ORDER BY v;
|
||||
|
||||
+--------------------------------------+---------------------------------------------------+
|
||||
| v | vec_to_string(vec_subvector(v,Int64(3),Int64(5))) |
|
||||
+--------------------------------------+---------------------------------------------------+
|
||||
| [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0] | [-4,-5] |
|
||||
| [1.0, 2.0, 3.0, 4.0, 5.0] | [4,5] |
|
||||
| [4.0, 5.0, 6.0, 10, -8, 100] | [10,-8] |
|
||||
+--------------------------------------+---------------------------------------------------+
|
||||
|
||||
SELECT vec_to_string(vec_subvector(v, 0, 5))
|
||||
FROM (
|
||||
SELECT '[1.1, 2.2, 3.3, 4.4, 5.5]' AS v
|
||||
UNION ALL
|
||||
SELECT '[-1.1, -2.1, -3.1, -4.1, -5.1, -6.1]' AS v
|
||||
UNION ALL
|
||||
SELECT '[4.0, 5.0, 6.0, 10, -8, 100]' AS v
|
||||
) ORDER BY v;
|
||||
|
||||
+---------------------------------------------------+
|
||||
| vec_to_string(vec_subvector(v,Int64(0),Int64(5))) |
|
||||
+---------------------------------------------------+
|
||||
| [-1.1,-2.1,-3.1,-4.1,-5.1] |
|
||||
| [1.1,2.2,3.3,4.4,5.5] |
|
||||
| [4,5,6,10,-8] |
|
||||
+---------------------------------------------------+
|
||||
|
||||
|
||||
@@ -99,3 +99,26 @@ FROM (
|
||||
UNION ALL
|
||||
SELECT '[7.0, 8.0, 9.0, 10.0]' AS v
|
||||
) Order By vec_dim(v) ASC;
|
||||
|
||||
SELECT vec_to_string(vec_subvector('[1.0,2.0,3.0,4.0,5.0]', 0, 3));
|
||||
|
||||
SELECT vec_to_string(vec_subvector('[1.0,2.0,3.0,4.0,5.0]', 5, 5));
|
||||
|
||||
SELECT v, vec_to_string(vec_subvector(v, 3, 5))
|
||||
FROM (
|
||||
SELECT '[1.0, 2.0, 3.0, 4.0, 5.0]' AS v
|
||||
UNION ALL
|
||||
SELECT '[-1.0, -2.0, -3.0, -4.0, -5.0, -6.0]' AS v
|
||||
UNION ALL
|
||||
SELECT '[4.0, 5.0, 6.0, 10, -8, 100]' AS v
|
||||
) ORDER BY v;
|
||||
|
||||
SELECT vec_to_string(vec_subvector(v, 0, 5))
|
||||
FROM (
|
||||
SELECT '[1.1, 2.2, 3.3, 4.4, 5.5]' AS v
|
||||
UNION ALL
|
||||
SELECT '[-1.1, -2.1, -3.1, -4.1, -5.1, -6.1]' AS v
|
||||
UNION ALL
|
||||
SELECT '[4.0, 5.0, 6.0, 10, -8, 100]' AS v
|
||||
) ORDER BY v;
|
||||
|
||||
|
||||
@@ -204,3 +204,26 @@ DROP TABLE integers;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE characters(c STRING, t TIMESTAMP TIME INDEX);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO characters VALUES ('a', 1), ('b', 2), ('c', 3), (NULL, 4), ('a', 5), ('b', 6), ('c', 7), (NULL, 8);
|
||||
|
||||
Affected Rows: 8
|
||||
|
||||
SELECT * FROM characters WHERE c IN ('a', 'c') ORDER BY t;
|
||||
|
||||
+---+-------------------------+
|
||||
| c | t |
|
||||
+---+-------------------------+
|
||||
| a | 1970-01-01T00:00:00.001 |
|
||||
| c | 1970-01-01T00:00:00.003 |
|
||||
| a | 1970-01-01T00:00:00.005 |
|
||||
| c | 1970-01-01T00:00:00.007 |
|
||||
+---+-------------------------+
|
||||
|
||||
DROP TABLE characters;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
|
||||
@@ -57,3 +57,11 @@ SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2
|
||||
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2 GROUP BY 1) a1 WHERE cond ORDER BY 1;
|
||||
|
||||
DROP TABLE integers;
|
||||
|
||||
CREATE TABLE characters(c STRING, t TIMESTAMP TIME INDEX);
|
||||
|
||||
INSERT INTO characters VALUES ('a', 1), ('b', 2), ('c', 3), (NULL, 4), ('a', 5), ('b', 6), ('c', 7), (NULL, 8);
|
||||
|
||||
SELECT * FROM characters WHERE c IN ('a', 'c') ORDER BY t;
|
||||
|
||||
DROP TABLE characters;
|
||||
|
||||
@@ -15,8 +15,8 @@ common-error.workspace = true
|
||||
common-query.workspace = true
|
||||
common-recordbatch.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes = { workspace = true }
|
||||
flate2 = "1.0"
|
||||
datatypes.workspace = true
|
||||
flate2.workspace = true
|
||||
hex = "0.4"
|
||||
local-ip-address = "0.6"
|
||||
mysql = { version = "25.0.1", default-features = false, features = ["minimal", "rustls-tls"] }
|
||||
@@ -31,5 +31,5 @@ tar = "0.4"
|
||||
tempfile.workspace = true
|
||||
tinytemplate = "1.2"
|
||||
tokio.workspace = true
|
||||
tokio-postgres = { workspace = true }
|
||||
tokio-postgres.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
|
||||
Reference in New Issue
Block a user