mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
39 Commits
release/v0
...
jkt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edd8cb6710 | ||
|
|
7ee61e5d28 | ||
|
|
1b30aca5a5 | ||
|
|
99b352cea1 | ||
|
|
0f521956bf | ||
|
|
aee72ab363 | ||
|
|
3c943be189 | ||
|
|
5b78d76fc5 | ||
|
|
eeba466717 | ||
|
|
2ff54486d3 | ||
|
|
a166430650 | ||
|
|
007a2b3dfe | ||
|
|
f35e957ddd | ||
|
|
68414bf593 | ||
|
|
5e836a0d1b | ||
|
|
f5e0da2fc8 | ||
|
|
fb96d26ebf | ||
|
|
0046d3f65b | ||
|
|
d7b97fc877 | ||
|
|
bfdaa28b25 | ||
|
|
6293bb1f5b | ||
|
|
8fa1ebcc3e | ||
|
|
c18c3f5839 | ||
|
|
629e72d8c0 | ||
|
|
e4065505ab | ||
|
|
aafd164483 | ||
|
|
1386e903d6 | ||
|
|
12692a940c | ||
|
|
4d44cbb8b2 | ||
|
|
f4911aa3bb | ||
|
|
5ac61f17bc | ||
|
|
e0d34c6d95 | ||
|
|
8a98b9c433 | ||
|
|
1f5d36a203 | ||
|
|
6fc7168893 | ||
|
|
2799d67212 | ||
|
|
d97a76c312 | ||
|
|
15caca244e | ||
|
|
8638075cdd |
14
.github/scripts/create-version.sh
vendored
14
.github/scripts/create-version.sh
vendored
@@ -10,17 +10,17 @@ set -e
|
||||
function create_version() {
|
||||
# Read from envrionment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty"
|
||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty"
|
||||
exit 1
|
||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||
export NEXT_RELEASE_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -35,7 +35,7 @@ function create_version() {
|
||||
# It will be like 'dev-2023080819-f0e7216c'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||
if [ -z "$COMMIT_SHA" ]; then
|
||||
echo "COMMIT_SHA is empty in dev build"
|
||||
echo "COMMIT_SHA is empty in dev build" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||
@@ -45,7 +45,7 @@ function create_version() {
|
||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||
echo "GITHUB_REF_NAME is empty in push event"
|
||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "$GITHUB_REF_NAME"
|
||||
@@ -54,7 +54,7 @@ function create_version() {
|
||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||
else
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -90,8 +90,6 @@ env:
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.14.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -135,7 +133,6 @@ jobs:
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
|
||||
180
Cargo.lock
generated
180
Cargo.lock
generated
@@ -173,9 +173,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.89"
|
||||
version = "1.0.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
|
||||
[[package]]
|
||||
name = "anymap2"
|
||||
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -915,7 +915,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1537,7 +1537,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1561,7 +1561,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -1597,7 +1597,7 @@ dependencies = [
|
||||
"partition",
|
||||
"paste",
|
||||
"prometheus",
|
||||
"rustc-hash 2.0.0",
|
||||
"rustc-hash 2.1.1",
|
||||
"serde_json",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
@@ -1874,7 +1874,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1917,7 +1917,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1926,7 +1926,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1955,7 +1955,7 @@ dependencies = [
|
||||
"rand 0.9.0",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1996,7 +1996,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -2056,7 +2056,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -2102,7 +2102,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -2124,11 +2124,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2153,7 +2153,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -2190,7 +2190,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.8",
|
||||
"common-error",
|
||||
@@ -2203,7 +2203,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-macro",
|
||||
"http 1.1.0",
|
||||
@@ -2214,7 +2214,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2224,7 +2224,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2277,7 +2277,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2294,7 +2294,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2325,7 +2325,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2344,7 +2344,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2358,7 +2358,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2371,7 +2371,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2432,7 +2432,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2441,11 +2441,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2457,7 +2457,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2484,7 +2484,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2493,7 +2493,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2519,7 +2519,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2539,7 +2539,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2569,14 +2569,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-session"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"strum 0.27.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2604,7 +2604,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2616,7 +2616,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -2634,7 +2634,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2644,7 +2644,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3110,9 +3110,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "data-encoding"
|
||||
version = "2.6.0"
|
||||
version = "2.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
|
||||
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
|
||||
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
@@ -3572,7 +3572,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3624,7 +3624,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3633,7 +3633,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -4259,7 +4259,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4382,7 +4382,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -4444,7 +4444,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4499,7 +4499,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4556,7 +4556,7 @@ dependencies = [
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -5795,7 +5795,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6509,7 +6509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6599,13 +6599,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.22"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6617,7 +6617,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6911,7 +6911,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6939,7 +6939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7029,7 +7029,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7118,7 +7118,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7780,7 +7780,7 @@ version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
|
||||
dependencies = [
|
||||
"proc-macro-crate 1.3.1",
|
||||
"proc-macro-crate 3.2.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
@@ -7824,7 +7824,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -8119,7 +8119,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8168,7 +8168,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8423,7 +8423,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8705,7 +8705,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8847,7 +8847,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -9127,7 +9127,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9373,7 +9373,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9414,7 +9414,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9480,7 +9480,7 @@ dependencies = [
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -9527,7 +9527,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"quinn-proto",
|
||||
"quinn-udp",
|
||||
"rustc-hash 2.0.0",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls",
|
||||
"socket2",
|
||||
"thiserror 1.0.64",
|
||||
@@ -9544,7 +9544,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"rand 0.8.5",
|
||||
"ring",
|
||||
"rustc-hash 2.0.0",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls",
|
||||
"slab",
|
||||
"thiserror 1.0.64",
|
||||
@@ -9821,9 +9821,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.0"
|
||||
version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -10333,9 +10333,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "2.0.0"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152"
|
||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
@@ -10830,7 +10830,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -10950,7 +10950,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11158,9 +11158,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallbitvec"
|
||||
version = "2.5.3"
|
||||
version = "2.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fcc3fc564a4b53fd1e8589628efafe57602d91bde78be18186b5f61e8faea470"
|
||||
checksum = "d31d263dd118560e1a492922182ab6ca6dc1d03a3bf54e7699993f31a4150e3f"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
@@ -11275,7 +11275,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11330,7 +11330,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11649,7 +11649,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11798,7 +11798,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11978,7 +11978,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12229,7 +12229,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12273,7 +12273,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12340,7 +12340,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.15.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
|
||||
@@ -68,15 +68,16 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.result_large_err = "allow"
|
||||
clippy.large_enum_variant = "allow"
|
||||
clippy.doc_overindented_list_items = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
@@ -162,7 +163,7 @@ paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.5.1", features = ["ser"] }
|
||||
prost = "0.13"
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
ratelimit = "0.10"
|
||||
|
||||
18
flake.lock
generated
18
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1737613896,
|
||||
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
|
||||
"lastModified": 1742452566,
|
||||
"narHash": "sha256-sVuLDQ2UIWfXUBbctzrZrXM2X05YjX08K7XHMztt36E=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "303a062fdd8e89f233db05868468975d17855d80",
|
||||
"rev": "7d9ba794daf5e8cc7ee728859bc688d8e26d5f06",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,11 +41,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1737569578,
|
||||
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
|
||||
"lastModified": 1743576891,
|
||||
"narHash": "sha256-vXiKURtntURybE6FMNFAVpRPr8+e8KoLPrYs9TGuAKc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
|
||||
"rev": "44a69ed688786e98a101f02b712c313f1ade37ab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -65,11 +65,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1737581772,
|
||||
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
|
||||
"lastModified": 1742296961,
|
||||
"narHash": "sha256-gCpvEQOrugHWLimD1wTFOJHagnSEP6VYBDspq96Idu0=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
|
||||
"rev": "15d87419f1a123d8f888d608129c3ce3ff8f13d4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
lib = nixpkgs.lib;
|
||||
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
|
||||
sha256 = "sha256-i0Sh/ZFFsHlZ3oFZFc24qdk6Cd8Do8OPU4HJQsrKOeM=";
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-12-25"
|
||||
channel = "nightly-2025-04-15"
|
||||
|
||||
@@ -84,12 +84,6 @@ mod tests {
|
||||
let key1 = "3178510";
|
||||
let key2 = "4215648";
|
||||
|
||||
// have collision
|
||||
assert_eq!(
|
||||
oid_map.hasher.hash_one(key1) as u32,
|
||||
oid_map.hasher.hash_one(key2) as u32
|
||||
);
|
||||
|
||||
// insert them into oid_map
|
||||
let oid1 = oid_map.get_oid(key1);
|
||||
let oid2 = oid_map.get_oid(key2);
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::iter::repeat_n;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, iter};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Volatility;
|
||||
@@ -126,9 +127,10 @@ impl Function for MatchesTermFunction {
|
||||
let term = term_column.get_ref(0).as_string().unwrap();
|
||||
match term {
|
||||
None => {
|
||||
return Ok(Arc::new(BooleanVector::from_iter(
|
||||
iter::repeat(None).take(text_column.len()),
|
||||
)));
|
||||
return Ok(Arc::new(BooleanVector::from_iter(repeat_n(
|
||||
None,
|
||||
text_column.len(),
|
||||
))));
|
||||
}
|
||||
Some(term) => Some(MatchesTermFinder::new(term)),
|
||||
}
|
||||
@@ -217,7 +219,7 @@ impl MatchesTermFinder {
|
||||
}
|
||||
|
||||
let mut pos = 0;
|
||||
while let Some(found_pos) = self.finder.find(text[pos..].as_bytes()) {
|
||||
while let Some(found_pos) = self.finder.find(&text.as_bytes()[pos..]) {
|
||||
let actual_pos = pos + found_pos;
|
||||
|
||||
let prev_ok = self.starts_with_non_alnum
|
||||
|
||||
@@ -37,7 +37,7 @@ impl fmt::Display for RateFunction {
|
||||
|
||||
impl Function for RateFunction {
|
||||
fn name(&self) -> &str {
|
||||
"prom_rate"
|
||||
"rate"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
@@ -82,7 +82,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_rate_function() {
|
||||
let rate = RateFunction;
|
||||
assert_eq!("prom_rate", rate.name());
|
||||
assert_eq!("rate", rate.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
rate.return_type(&[]).unwrap()
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
#![feature(assert_matches)]
|
||||
#![feature(btree_extract_if)]
|
||||
#![feature(let_chains)]
|
||||
#![feature(extract_if)]
|
||||
#![feature(hash_extract_if)]
|
||||
|
||||
pub mod cache;
|
||||
pub mod cache_invalidator;
|
||||
|
||||
@@ -176,15 +176,12 @@ impl TableRoute {
|
||||
})?
|
||||
.into();
|
||||
|
||||
let leader_peer = peers
|
||||
.get(region_route.leader_peer_index as usize)
|
||||
.cloned()
|
||||
.map(Into::into);
|
||||
let leader_peer = peers.get(region_route.leader_peer_index as usize).cloned();
|
||||
|
||||
let follower_peers = region_route
|
||||
.follower_peer_indexes
|
||||
.into_iter()
|
||||
.filter_map(|x| peers.get(x as usize).cloned().map(Into::into))
|
||||
.filter_map(|x| peers.get(x as usize).cloned())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
region_routes.push(RegionRoute {
|
||||
|
||||
@@ -24,7 +24,7 @@ use datatypes::prelude::*;
|
||||
use datatypes::vectors::{Helper as VectorHelper, VectorRef};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Error, FromScalarValueSnafu, IntoVectorSnafu, Result};
|
||||
use crate::error::{self, FromScalarValueSnafu, IntoVectorSnafu, Result};
|
||||
use crate::prelude::*;
|
||||
|
||||
pub type AggregateFunctionCreatorRef = Arc<dyn AggregateFunctionCreator>;
|
||||
@@ -166,8 +166,7 @@ impl DfAccumulator for DfAccumulatorAdaptor {
|
||||
let output_type = self.creator.output_type()?;
|
||||
let scalar_value = value
|
||||
.try_to_scalar_value(&output_type)
|
||||
.context(error::ToScalarValueSnafu)
|
||||
.map_err(Error::from)?;
|
||||
.context(error::ToScalarValueSnafu)?;
|
||||
Ok(scalar_value)
|
||||
}
|
||||
|
||||
|
||||
@@ -253,9 +253,10 @@ fn create_current_timestamp_vector(
|
||||
data_type: &ConcreteDataType,
|
||||
num_rows: usize,
|
||||
) -> Result<VectorRef> {
|
||||
let current_timestamp_vector = TimestampMillisecondVector::from_values(
|
||||
std::iter::repeat(util::current_time_millis()).take(num_rows),
|
||||
);
|
||||
let current_timestamp_vector = TimestampMillisecondVector::from_values(std::iter::repeat_n(
|
||||
util::current_time_millis(),
|
||||
num_rows,
|
||||
));
|
||||
if data_type.is_timestamp() {
|
||||
current_timestamp_vector.cast(data_type)
|
||||
} else {
|
||||
|
||||
@@ -198,8 +198,7 @@ impl fmt::Debug for ConstantVector {
|
||||
|
||||
impl Serializable for ConstantVector {
|
||||
fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
|
||||
std::iter::repeat(self.get(0))
|
||||
.take(self.len())
|
||||
std::iter::repeat_n(self.get(0), self.len())
|
||||
.map(serde_json::Value::try_from)
|
||||
.collect::<serde_json::Result<_>>()
|
||||
.context(SerializeSnafu)
|
||||
|
||||
@@ -412,7 +412,7 @@ pub(crate) fn replicate_decimal128(
|
||||
// Safety: std::iter::Repeat and std::iter::Take implement TrustedLen.
|
||||
builder
|
||||
.mutable_array
|
||||
.append_trusted_len_iter(std::iter::repeat(data).take(repeat_times));
|
||||
.append_trusted_len_iter(std::iter::repeat_n(data, repeat_times));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
|
||||
@@ -16,8 +16,8 @@ use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::array::Array;
|
||||
use arrow::datatypes::Int32Type;
|
||||
use arrow_array::{ArrayRef, DictionaryArray, Int32Array};
|
||||
use arrow::datatypes::Int64Type;
|
||||
use arrow_array::{ArrayRef, DictionaryArray, Int64Array};
|
||||
use serde_json::Value as JsonValue;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -32,7 +32,7 @@ use crate::vectors::{self, Helper, Validity, Vector, VectorRef};
|
||||
/// Vector of dictionaries, basically backed by Arrow's `DictionaryArray`.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct DictionaryVector {
|
||||
array: DictionaryArray<Int32Type>,
|
||||
array: DictionaryArray<Int64Type>,
|
||||
/// The datatype of the items in the dictionary.
|
||||
item_type: ConcreteDataType,
|
||||
/// The vector of items in the dictionary.
|
||||
@@ -41,7 +41,7 @@ pub struct DictionaryVector {
|
||||
|
||||
impl DictionaryVector {
|
||||
/// Create a new instance of `DictionaryVector` from a dictionary array and item type
|
||||
pub fn new(array: DictionaryArray<Int32Type>, item_type: ConcreteDataType) -> Result<Self> {
|
||||
pub fn new(array: DictionaryArray<Int64Type>, item_type: ConcreteDataType) -> Result<Self> {
|
||||
let item_vector = Helper::try_into_vector(array.values())?;
|
||||
|
||||
Ok(Self {
|
||||
@@ -52,12 +52,12 @@ impl DictionaryVector {
|
||||
}
|
||||
|
||||
/// Returns the underlying Arrow dictionary array
|
||||
pub fn array(&self) -> &DictionaryArray<Int32Type> {
|
||||
pub fn array(&self) -> &DictionaryArray<Int64Type> {
|
||||
&self.array
|
||||
}
|
||||
|
||||
/// Returns the keys array of this dictionary
|
||||
pub fn keys(&self) -> &arrow_array::PrimitiveArray<Int32Type> {
|
||||
pub fn keys(&self) -> &arrow_array::PrimitiveArray<Int64Type> {
|
||||
self.array.keys()
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ impl DictionaryVector {
|
||||
impl Vector for DictionaryVector {
|
||||
fn data_type(&self) -> ConcreteDataType {
|
||||
ConcreteDataType::Dictionary(DictionaryType::new(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
self.item_type.clone(),
|
||||
))
|
||||
}
|
||||
@@ -163,10 +163,10 @@ impl Serializable for DictionaryVector {
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<DictionaryArray<Int32Type>> for DictionaryVector {
|
||||
impl TryFrom<DictionaryArray<Int64Type>> for DictionaryVector {
|
||||
type Error = crate::error::Error;
|
||||
|
||||
fn try_from(array: DictionaryArray<Int32Type>) -> Result<Self> {
|
||||
fn try_from(array: DictionaryArray<Int64Type>) -> Result<Self> {
|
||||
let item_type = ConcreteDataType::from_arrow_type(array.values().data_type());
|
||||
let item_vector = Helper::try_into_vector(array.values())?;
|
||||
|
||||
@@ -243,7 +243,7 @@ impl VectorOp for DictionaryVector {
|
||||
previous_offset = offset;
|
||||
}
|
||||
|
||||
let new_keys = Int32Array::from(replicated_keys);
|
||||
let new_keys = Int64Array::from(replicated_keys);
|
||||
let new_array = DictionaryArray::try_new(new_keys, self.values().clone())
|
||||
.expect("Failed to create replicated dictionary array");
|
||||
|
||||
@@ -261,7 +261,7 @@ impl VectorOp for DictionaryVector {
|
||||
let filtered_key_array = filtered_key_vector.to_arrow_array();
|
||||
let filtered_key_array = filtered_key_array
|
||||
.as_any()
|
||||
.downcast_ref::<Int32Array>()
|
||||
.downcast_ref::<Int64Array>()
|
||||
.unwrap();
|
||||
|
||||
let new_array = DictionaryArray::try_new(filtered_key_array.clone(), self.values().clone())
|
||||
@@ -291,7 +291,7 @@ impl VectorOp for DictionaryVector {
|
||||
let key_vector = Helper::try_into_vector(&key_array)?;
|
||||
let new_key_vector = key_vector.take(indices)?;
|
||||
let new_key_array = new_key_vector.to_arrow_array();
|
||||
let new_key_array = new_key_array.as_any().downcast_ref::<Int32Array>().unwrap();
|
||||
let new_key_array = new_key_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
let new_array = DictionaryArray::try_new(new_key_array.clone(), self.values().clone())
|
||||
.expect("Failed to create filtered dictionary array");
|
||||
@@ -318,7 +318,7 @@ mod tests {
|
||||
// Keys: [0, 1, 2, null, 1, 3]
|
||||
// Resulting in: ["a", "b", "c", null, "b", "d"]
|
||||
let values = StringArray::from(vec!["a", "b", "c", "d"]);
|
||||
let keys = Int32Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]);
|
||||
let keys = Int64Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]);
|
||||
let dict_array = DictionaryArray::new(keys, Arc::new(values));
|
||||
DictionaryVector::try_from(dict_array).unwrap()
|
||||
}
|
||||
@@ -404,7 +404,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
casted.data_type(),
|
||||
ConcreteDataType::Dictionary(DictionaryType::new(
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
))
|
||||
);
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::sync::Arc;
|
||||
use arrow::array::{Array, ArrayRef, StringArray};
|
||||
use arrow::compute;
|
||||
use arrow::compute::kernels::comparison;
|
||||
use arrow::datatypes::{DataType as ArrowDataType, Int32Type, TimeUnit};
|
||||
use arrow::datatypes::{DataType as ArrowDataType, Int64Type, TimeUnit};
|
||||
use arrow_array::DictionaryArray;
|
||||
use arrow_schema::IntervalUnit;
|
||||
use datafusion_common::ScalarValue;
|
||||
@@ -348,11 +348,11 @@ impl Helper {
|
||||
ArrowDataType::Decimal128(_, _) => {
|
||||
Arc::new(Decimal128Vector::try_from_arrow_array(array)?)
|
||||
}
|
||||
ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int32) => {
|
||||
ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int64) => {
|
||||
let array = array
|
||||
.as_ref()
|
||||
.as_any()
|
||||
.downcast_ref::<DictionaryArray<Int32Type>>()
|
||||
.downcast_ref::<DictionaryArray<Int64Type>>()
|
||||
.unwrap(); // Safety: the type is guarded by match arm condition
|
||||
Arc::new(DictionaryVector::new(
|
||||
array.clone(),
|
||||
|
||||
@@ -120,9 +120,7 @@ impl fmt::Debug for NullVector {
|
||||
|
||||
impl Serializable for NullVector {
|
||||
fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
|
||||
Ok(std::iter::repeat(serde_json::Value::Null)
|
||||
.take(self.len())
|
||||
.collect())
|
||||
Ok(std::iter::repeat_n(serde_json::Value::Null, self.len()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -388,7 +388,7 @@ pub(crate) fn replicate_primitive<T: LogicalPrimitiveType>(
|
||||
// Safety: std::iter::Repeat and std::iter::Take implement TrustedLen.
|
||||
builder
|
||||
.mutable_array
|
||||
.append_trusted_len_iter(std::iter::repeat(data).take(repeat_times));
|
||||
.append_trusted_len_iter(std::iter::repeat_n(data, repeat_times));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
|
||||
@@ -481,7 +481,7 @@ mod tests {
|
||||
|
||||
let mock_values = dic_values
|
||||
.iter()
|
||||
.flat_map(|(value, size)| iter::repeat(value.clone()).take(*size))
|
||||
.flat_map(|(value, size)| std::iter::repeat_n(value.clone(), *size))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let sorted_result = sorted_result(&mock_values, segment_row_count);
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
#![feature(result_flattening)]
|
||||
#![feature(assert_matches)]
|
||||
#![feature(extract_if)]
|
||||
#![feature(hash_set_entry)]
|
||||
|
||||
pub mod bootstrap;
|
||||
|
||||
@@ -141,10 +141,7 @@ pub async fn mock(
|
||||
if let Some(client) = client {
|
||||
Ok(TokioIo::new(client))
|
||||
} else {
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"Client already taken",
|
||||
))
|
||||
Err(std::io::Error::other("Client already taken"))
|
||||
}
|
||||
}
|
||||
}),
|
||||
|
||||
@@ -278,7 +278,7 @@ impl KvBackend for LeaderCachedKvBackend {
|
||||
|
||||
let remote_res = self.store.batch_get(remote_req).await?;
|
||||
let put_req = BatchPutRequest {
|
||||
kvs: remote_res.kvs.clone().into_iter().map(Into::into).collect(),
|
||||
kvs: remote_res.kvs.clone().into_iter().collect(),
|
||||
..Default::default()
|
||||
};
|
||||
let _ = self.cache.batch_put(put_req).await?;
|
||||
|
||||
@@ -710,8 +710,8 @@ pub enum Error {
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to filter record batch"))]
|
||||
FilterRecordBatch {
|
||||
#[snafu(display("Record batch error"))]
|
||||
RecordBatch {
|
||||
source: common_recordbatch::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
@@ -1032,6 +1032,20 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan series"))]
|
||||
ScanSeries {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: Arc<Error>,
|
||||
},
|
||||
|
||||
#[snafu(display("Partition {} scan multiple times", partition))]
|
||||
ScanMultiTimes {
|
||||
partition: usize,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
@@ -1154,7 +1168,7 @@ impl ErrorExt for Error {
|
||||
|
||||
External { source, .. } => source.status_code(),
|
||||
|
||||
FilterRecordBatch { source, .. } => source.status_code(),
|
||||
RecordBatch { source, .. } => source.status_code(),
|
||||
|
||||
Download { .. } | Upload { .. } => StatusCode::StorageUnavailable,
|
||||
ChecksumMismatch { .. } => StatusCode::Unexpected,
|
||||
@@ -1183,7 +1197,12 @@ impl ErrorExt for Error {
|
||||
ManualCompactionOverride {} => StatusCode::Cancelled,
|
||||
|
||||
IncompatibleWalProviderChange { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
ConvertDataType { .. } => StatusCode::Internal,
|
||||
|
||||
ScanSeries { source, .. } => source.status_code(),
|
||||
|
||||
ScanMultiTimes { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ pub(crate) mod range;
|
||||
pub(crate) mod scan_region;
|
||||
pub(crate) mod scan_util;
|
||||
pub(crate) mod seq_scan;
|
||||
pub(crate) mod series_scan;
|
||||
pub(crate) mod unordered_scan;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
@@ -363,9 +363,9 @@ mod tests {
|
||||
builder
|
||||
.push_field_array(
|
||||
*column_id,
|
||||
Arc::new(Int64Array::from_iter_values(
|
||||
std::iter::repeat(*field).take(num_rows),
|
||||
)),
|
||||
Arc::new(Int64Array::from_iter_values(std::iter::repeat_n(
|
||||
*field, num_rows,
|
||||
))),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use datatypes::arrow::array::BooleanArray;
|
||||
use datatypes::arrow::buffer::BooleanBuffer;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{FilterRecordBatchSnafu, Result};
|
||||
use crate::error::{RecordBatchSnafu, Result};
|
||||
use crate::memtable::BoxedBatchIterator;
|
||||
use crate::read::last_row::RowGroupLastRowCachedReader;
|
||||
use crate::read::{Batch, BatchReader};
|
||||
@@ -201,7 +201,7 @@ impl PruneTimeIterator {
|
||||
for filter in filters.iter() {
|
||||
let result = filter
|
||||
.evaluate_vector(batch.timestamps())
|
||||
.context(FilterRecordBatchSnafu)?;
|
||||
.context(RecordBatchSnafu)?;
|
||||
mask = mask.bitand(&result);
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ use crate::read::compat::{self, CompatBatch};
|
||||
use crate::read::projection::ProjectionMapper;
|
||||
use crate::read::range::{FileRangeBuilder, MemRangeBuilder, RangeMeta, RowGroupIndex};
|
||||
use crate::read::seq_scan::SeqScan;
|
||||
use crate::read::series_scan::SeriesScan;
|
||||
use crate::read::unordered_scan::UnorderedScan;
|
||||
use crate::read::{Batch, Source};
|
||||
use crate::region::options::MergeMode;
|
||||
@@ -66,6 +67,8 @@ pub(crate) enum Scanner {
|
||||
Seq(SeqScan),
|
||||
/// Unordered scan.
|
||||
Unordered(UnorderedScan),
|
||||
/// Per-series scan.
|
||||
Series(SeriesScan),
|
||||
}
|
||||
|
||||
impl Scanner {
|
||||
@@ -75,6 +78,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.build_stream(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.build_stream().await,
|
||||
Scanner::Series(series_scan) => series_scan.build_stream().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,6 +90,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.input().num_files(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.input().num_files(),
|
||||
Scanner::Series(series_scan) => series_scan.input().num_files(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +99,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.input().num_memtables(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.input().num_memtables(),
|
||||
Scanner::Series(series_scan) => series_scan.input().num_memtables(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,6 +108,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.input().file_ids(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.input().file_ids(),
|
||||
Scanner::Series(series_scan) => series_scan.input().file_ids(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,6 +120,7 @@ impl Scanner {
|
||||
match self {
|
||||
Scanner::Seq(seq_scan) => seq_scan.prepare(request).unwrap(),
|
||||
Scanner::Unordered(unordered_scan) => unordered_scan.prepare(request).unwrap(),
|
||||
Scanner::Series(series_scan) => series_scan.prepare(request).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -248,7 +256,9 @@ impl ScanRegion {
|
||||
|
||||
/// Returns a [Scanner] to scan the region.
|
||||
pub(crate) fn scanner(self) -> Result<Scanner> {
|
||||
if self.use_unordered_scan() {
|
||||
if self.use_series_scan() {
|
||||
self.series_scan().map(Scanner::Series)
|
||||
} else if self.use_unordered_scan() {
|
||||
// If table is append only and there is no series row selector, we use unordered scan in query.
|
||||
// We still use seq scan in compaction.
|
||||
self.unordered_scan().map(Scanner::Unordered)
|
||||
@@ -260,7 +270,9 @@ impl ScanRegion {
|
||||
/// Returns a [RegionScanner] to scan the region.
|
||||
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
|
||||
pub(crate) fn region_scanner(self) -> Result<RegionScannerRef> {
|
||||
if self.use_unordered_scan() {
|
||||
if self.use_series_scan() {
|
||||
self.series_scan().map(|scanner| Box::new(scanner) as _)
|
||||
} else if self.use_unordered_scan() {
|
||||
self.unordered_scan().map(|scanner| Box::new(scanner) as _)
|
||||
} else {
|
||||
self.seq_scan().map(|scanner| Box::new(scanner) as _)
|
||||
@@ -279,6 +291,12 @@ impl ScanRegion {
|
||||
Ok(UnorderedScan::new(input))
|
||||
}
|
||||
|
||||
/// Scans by series.
|
||||
pub(crate) fn series_scan(self) -> Result<SeriesScan> {
|
||||
let input = self.scan_input(true)?;
|
||||
Ok(SeriesScan::new(input))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn scan_without_filter_deleted(self) -> Result<SeqScan> {
|
||||
let input = self.scan_input(false)?;
|
||||
@@ -299,6 +317,11 @@ impl ScanRegion {
|
||||
|| self.request.distribution == Some(TimeSeriesDistribution::TimeWindowed))
|
||||
}
|
||||
|
||||
/// Returns true if the region can use series scan for current request.
|
||||
fn use_series_scan(&self) -> bool {
|
||||
self.request.distribution == Some(TimeSeriesDistribution::PerSeries)
|
||||
}
|
||||
|
||||
/// Creates a scan input.
|
||||
fn scan_input(mut self, filter_deleted: bool) -> Result<ScanInput> {
|
||||
let time_range = self.build_time_range_predicate();
|
||||
|
||||
@@ -92,6 +92,8 @@ struct ScanMetricsSet {
|
||||
|
||||
/// Elapsed time before the first poll operation.
|
||||
first_poll: Duration,
|
||||
/// Number of send timeout in SeriesScan.
|
||||
num_series_send_timeout: usize,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ScanMetricsSet {
|
||||
@@ -122,6 +124,7 @@ impl fmt::Debug for ScanMetricsSet {
|
||||
num_sst_batches,
|
||||
num_sst_rows,
|
||||
first_poll,
|
||||
num_series_send_timeout,
|
||||
} = self;
|
||||
|
||||
write!(
|
||||
@@ -150,7 +153,8 @@ impl fmt::Debug for ScanMetricsSet {
|
||||
num_sst_record_batches={num_sst_record_batches}, \
|
||||
num_sst_batches={num_sst_batches}, \
|
||||
num_sst_rows={num_sst_rows}, \
|
||||
first_poll={first_poll:?}}}"
|
||||
first_poll={first_poll:?}, \
|
||||
num_series_send_timeout={num_series_send_timeout}}}"
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -439,6 +443,12 @@ impl PartitionMetrics {
|
||||
pub(crate) fn on_finish(&self) {
|
||||
self.0.on_finish();
|
||||
}
|
||||
|
||||
/// Sets the `num_series_send_timeout`.
|
||||
pub(crate) fn set_num_series_send_timeout(&self, num_timeout: usize) {
|
||||
let mut metrics = self.0.metrics.lock().unwrap();
|
||||
metrics.num_series_send_timeout = num_timeout;
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PartitionMetrics {
|
||||
|
||||
@@ -30,7 +30,7 @@ use datatypes::schema::SchemaRef;
|
||||
use snafu::ResultExt;
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{PartitionRange, PrepareRequest, RegionScanner, ScannerProperties};
|
||||
use store_api::storage::{TimeSeriesDistribution, TimeSeriesRowSelector};
|
||||
use store_api::storage::TimeSeriesRowSelector;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::error::{PartitionOutOfRangeSnafu, Result};
|
||||
@@ -149,7 +149,7 @@ impl SeqScan {
|
||||
/// Builds a reader to read sources. If `semaphore` is provided, reads sources in parallel
|
||||
/// if possible.
|
||||
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
|
||||
async fn build_reader_from_sources(
|
||||
pub(crate) async fn build_reader_from_sources(
|
||||
stream_ctx: &StreamContext,
|
||||
mut sources: Vec<Source>,
|
||||
semaphore: Option<Arc<Semaphore>>,
|
||||
@@ -206,9 +206,13 @@ impl SeqScan {
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
|
||||
if self.stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) {
|
||||
return self.scan_partition_by_series(metrics_set, partition);
|
||||
if self.properties.partitions[partition].is_empty() {
|
||||
return Ok(Box::pin(RecordBatchStreamWrapper::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
common_recordbatch::EmptyRecordBatchStream::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
),
|
||||
)));
|
||||
}
|
||||
|
||||
let stream_ctx = self.stream_ctx.clone();
|
||||
@@ -237,14 +241,14 @@ impl SeqScan {
|
||||
&mut sources,
|
||||
);
|
||||
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
let mut reader =
|
||||
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let cache = &stream_ctx.input.cache_strategy;
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
#[cfg(debug_assertions)]
|
||||
let mut checker = crate::read::BatchChecker::default()
|
||||
.with_start(Some(part_range.start))
|
||||
@@ -307,97 +311,6 @@ impl SeqScan {
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
/// Scans all ranges in the given partition and merge by time series.
|
||||
/// Otherwise the returned stream might not contains any data.
|
||||
fn scan_partition_by_series(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
let stream_ctx = self.stream_ctx.clone();
|
||||
let semaphore = self.new_semaphore();
|
||||
let partition_ranges = self.properties.partitions[partition].clone();
|
||||
let distinguish_range = self.properties.distinguish_partition_range;
|
||||
let part_metrics = self.new_partition_metrics(metrics_set, partition);
|
||||
debug_assert!(!self.compaction);
|
||||
|
||||
let stream = try_stream! {
|
||||
part_metrics.on_first_poll();
|
||||
|
||||
let range_builder_list = Arc::new(RangeBuilderList::new(
|
||||
stream_ctx.input.num_memtables(),
|
||||
stream_ctx.input.num_files(),
|
||||
));
|
||||
// Scans all parts.
|
||||
let mut sources = Vec::with_capacity(partition_ranges.len());
|
||||
for part_range in partition_ranges {
|
||||
build_sources(
|
||||
&stream_ctx,
|
||||
&part_range,
|
||||
false,
|
||||
&part_metrics,
|
||||
range_builder_list.clone(),
|
||||
&mut sources,
|
||||
);
|
||||
}
|
||||
|
||||
// Builds a reader that merge sources from all parts.
|
||||
let mut reader =
|
||||
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let cache = &stream_ctx.input.cache_strategy;
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
|
||||
while let Some(batch) = reader
|
||||
.next_batch()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?
|
||||
{
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
metrics.num_batches += 1;
|
||||
metrics.num_rows += batch.num_rows();
|
||||
|
||||
debug_assert!(!batch.is_empty());
|
||||
if batch.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let convert_start = Instant::now();
|
||||
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
|
||||
metrics.convert_cost += convert_start.elapsed();
|
||||
let yield_start = Instant::now();
|
||||
yield record_batch;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
|
||||
fetch_start = Instant::now();
|
||||
}
|
||||
|
||||
// Yields an empty part to indicate this range is terminated.
|
||||
// The query engine can use this to optimize some queries.
|
||||
if distinguish_range {
|
||||
let yield_start = Instant::now();
|
||||
yield stream_ctx.input.mapper.empty_record_batch();
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
}
|
||||
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
part_metrics.merge_metrics(&metrics);
|
||||
|
||||
part_metrics.on_finish();
|
||||
};
|
||||
|
||||
let stream = Box::pin(RecordBatchStreamWrapper::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
Box::pin(stream),
|
||||
));
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
fn new_semaphore(&self) -> Option<Arc<Semaphore>> {
|
||||
if self.properties.target_partitions() > self.properties.num_partitions() {
|
||||
// We can use additional tasks to read the data if we have more target partitions than actual partitions.
|
||||
@@ -498,7 +411,7 @@ impl fmt::Debug for SeqScan {
|
||||
}
|
||||
|
||||
/// Builds sources for the partition range and push them to the `sources` vector.
|
||||
fn build_sources(
|
||||
pub(crate) fn build_sources(
|
||||
stream_ctx: &Arc<StreamContext>,
|
||||
part_range: &PartitionRange,
|
||||
compaction: bool,
|
||||
@@ -509,8 +422,8 @@ fn build_sources(
|
||||
// Gets range meta.
|
||||
let range_meta = &stream_ctx.ranges[part_range.identifier];
|
||||
#[cfg(debug_assertions)]
|
||||
if compaction || stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) {
|
||||
// Compaction or per series distribution expects input sources are not been split.
|
||||
if compaction {
|
||||
// Compaction expects input sources are not been split.
|
||||
debug_assert_eq!(range_meta.indices.len(), range_meta.row_group_indices.len());
|
||||
for (i, row_group_idx) in range_meta.row_group_indices.iter().enumerate() {
|
||||
// It should scan all row groups.
|
||||
|
||||
547
src/mito2/src/read/series_scan.rs
Normal file
547
src/mito2/src/read/series_scan.rs
Normal file
@@ -0,0 +1,547 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Per-series scan implementation.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::util::ChainedRecordBatchStream;
|
||||
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
|
||||
use datatypes::compute::concat_batches;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metadata::RegionMetadataRef;
|
||||
use store_api::region_engine::{PartitionRange, PrepareRequest, RegionScanner, ScannerProperties};
|
||||
use tokio::sync::mpsc::error::{SendTimeoutError, TrySendError};
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::error::{
|
||||
ComputeArrowSnafu, Error, InvalidSenderSnafu, PartitionOutOfRangeSnafu, Result,
|
||||
ScanMultiTimesSnafu, ScanSeriesSnafu,
|
||||
};
|
||||
use crate::read::range::RangeBuilderList;
|
||||
use crate::read::scan_region::{ScanInput, StreamContext};
|
||||
use crate::read::scan_util::{PartitionMetrics, PartitionMetricsList};
|
||||
use crate::read::seq_scan::{build_sources, SeqScan};
|
||||
use crate::read::{Batch, ScannerMetrics};
|
||||
|
||||
/// Timeout to send a batch to a sender.
|
||||
const SEND_TIMEOUT: Duration = Duration::from_millis(10);
|
||||
|
||||
/// List of receivers.
|
||||
type ReceiverList = Vec<Option<Receiver<Result<SeriesBatch>>>>;
|
||||
|
||||
/// Scans a region and returns sorted rows of a series in the same partition.
|
||||
///
|
||||
/// The output order is always order by `(primary key, time index)` inside every
|
||||
/// partition.
|
||||
/// Always returns the same series (primary key) to the same partition.
|
||||
pub struct SeriesScan {
|
||||
/// Properties of the scanner.
|
||||
properties: ScannerProperties,
|
||||
/// Context of streams.
|
||||
stream_ctx: Arc<StreamContext>,
|
||||
/// Receivers of each partition.
|
||||
receivers: Mutex<ReceiverList>,
|
||||
/// Metrics for each partition.
|
||||
/// The scanner only sets in query and keeps it empty during compaction.
|
||||
metrics_list: Arc<PartitionMetricsList>,
|
||||
}
|
||||
|
||||
impl SeriesScan {
|
||||
/// Creates a new [SeriesScan].
|
||||
pub(crate) fn new(input: ScanInput) -> Self {
|
||||
let mut properties = ScannerProperties::default()
|
||||
.with_append_mode(input.append_mode)
|
||||
.with_total_rows(input.total_rows());
|
||||
let stream_ctx = Arc::new(StreamContext::seq_scan_ctx(input, false));
|
||||
properties.partitions = vec![stream_ctx.partition_ranges()];
|
||||
|
||||
Self {
|
||||
properties,
|
||||
stream_ctx,
|
||||
receivers: Mutex::new(Vec::new()),
|
||||
metrics_list: Arc::new(PartitionMetricsList::default()),
|
||||
}
|
||||
}
|
||||
|
||||
fn scan_partition_impl(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
if partition >= self.properties.num_partitions() {
|
||||
return Err(BoxedError::new(
|
||||
PartitionOutOfRangeSnafu {
|
||||
given: partition,
|
||||
all: self.properties.num_partitions(),
|
||||
}
|
||||
.build(),
|
||||
));
|
||||
}
|
||||
|
||||
self.maybe_start_distributor(metrics_set, &self.metrics_list);
|
||||
|
||||
let part_metrics =
|
||||
new_partition_metrics(&self.stream_ctx, metrics_set, partition, &self.metrics_list);
|
||||
let mut receiver = self.take_receiver(partition).map_err(BoxedError::new)?;
|
||||
let stream_ctx = self.stream_ctx.clone();
|
||||
|
||||
let stream = try_stream! {
|
||||
part_metrics.on_first_poll();
|
||||
|
||||
let cache = &stream_ctx.input.cache_strategy;
|
||||
let mut df_record_batches = Vec::new();
|
||||
let mut fetch_start = Instant::now();
|
||||
while let Some(result) = receiver.recv().await {
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let series = result.map_err(BoxedError::new).context(ExternalSnafu)?;
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
fetch_start = Instant::now();
|
||||
|
||||
let convert_start = Instant::now();
|
||||
df_record_batches.reserve(series.batches.len());
|
||||
for batch in series.batches {
|
||||
metrics.num_batches += 1;
|
||||
metrics.num_rows += batch.num_rows();
|
||||
|
||||
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
|
||||
df_record_batches.push(record_batch.into_df_record_batch());
|
||||
}
|
||||
|
||||
let output_schema = stream_ctx.input.mapper.output_schema();
|
||||
let df_record_batch =
|
||||
concat_batches(output_schema.arrow_schema(), &df_record_batches)
|
||||
.context(ComputeArrowSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
df_record_batches.clear();
|
||||
let record_batch =
|
||||
RecordBatch::try_from_df_record_batch(output_schema, df_record_batch)?;
|
||||
metrics.convert_cost += convert_start.elapsed();
|
||||
|
||||
let yield_start = Instant::now();
|
||||
yield record_batch;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
|
||||
part_metrics.merge_metrics(&metrics);
|
||||
}
|
||||
};
|
||||
|
||||
let stream = Box::pin(RecordBatchStreamWrapper::new(
|
||||
self.stream_ctx.input.mapper.output_schema(),
|
||||
Box::pin(stream),
|
||||
));
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
/// Takes the receiver for the partition.
|
||||
fn take_receiver(&self, partition: usize) -> Result<Receiver<Result<SeriesBatch>>> {
|
||||
let mut rx_list = self.receivers.lock().unwrap();
|
||||
rx_list[partition]
|
||||
.take()
|
||||
.context(ScanMultiTimesSnafu { partition })
|
||||
}
|
||||
|
||||
/// Starts the distributor if the receiver list is empty.
|
||||
fn maybe_start_distributor(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
metrics_list: &Arc<PartitionMetricsList>,
|
||||
) {
|
||||
let mut rx_list = self.receivers.lock().unwrap();
|
||||
if !rx_list.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let (senders, receivers) = new_channel_list(self.properties.num_partitions());
|
||||
let mut distributor = SeriesDistributor {
|
||||
stream_ctx: self.stream_ctx.clone(),
|
||||
semaphore: Some(Arc::new(Semaphore::new(self.properties.num_partitions()))),
|
||||
partitions: self.properties.partitions.clone(),
|
||||
senders,
|
||||
metrics_set: metrics_set.clone(),
|
||||
metrics_list: metrics_list.clone(),
|
||||
};
|
||||
common_runtime::spawn_global(async move {
|
||||
distributor.execute().await;
|
||||
});
|
||||
|
||||
*rx_list = receivers;
|
||||
}
|
||||
|
||||
/// Scans the region and returns a stream.
|
||||
pub(crate) async fn build_stream(&self) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
let part_num = self.properties.num_partitions();
|
||||
let metrics_set = ExecutionPlanMetricsSet::default();
|
||||
let streams = (0..part_num)
|
||||
.map(|i| self.scan_partition(&metrics_set, i))
|
||||
.collect::<Result<Vec<_>, BoxedError>>()?;
|
||||
let chained_stream = ChainedRecordBatchStream::new(streams).map_err(BoxedError::new)?;
|
||||
Ok(Box::pin(chained_stream))
|
||||
}
|
||||
}
|
||||
|
||||
fn new_channel_list(num_partitions: usize) -> (SenderList, ReceiverList) {
|
||||
let (senders, receivers): (Vec<_>, Vec<_>) = (0..num_partitions)
|
||||
.map(|_| {
|
||||
let (sender, receiver) = mpsc::channel(1);
|
||||
(Some(sender), Some(receiver))
|
||||
})
|
||||
.unzip();
|
||||
(SenderList::new(senders), receivers)
|
||||
}
|
||||
|
||||
impl RegionScanner for SeriesScan {
|
||||
fn properties(&self) -> &ScannerProperties {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.stream_ctx.input.mapper.output_schema()
|
||||
}
|
||||
|
||||
fn metadata(&self) -> RegionMetadataRef {
|
||||
self.stream_ctx.input.mapper.metadata().clone()
|
||||
}
|
||||
|
||||
fn scan_partition(
|
||||
&self,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
) -> Result<SendableRecordBatchStream, BoxedError> {
|
||||
self.scan_partition_impl(metrics_set, partition)
|
||||
}
|
||||
|
||||
fn prepare(&mut self, request: PrepareRequest) -> Result<(), BoxedError> {
|
||||
self.properties.prepare(request);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_predicate(&self) -> bool {
|
||||
let predicate = self.stream_ctx.input.predicate();
|
||||
predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false)
|
||||
}
|
||||
|
||||
fn set_logical_region(&mut self, logical_region: bool) {
|
||||
self.properties.set_logical_region(logical_region);
|
||||
}
|
||||
}
|
||||
|
||||
impl DisplayAs for SeriesScan {
|
||||
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"SeriesScan: region={}, ",
|
||||
self.stream_ctx.input.mapper.metadata().region_id
|
||||
)?;
|
||||
match t {
|
||||
DisplayFormatType::Default => self.stream_ctx.format_for_explain(false, f),
|
||||
DisplayFormatType::Verbose => {
|
||||
self.stream_ctx.format_for_explain(true, f)?;
|
||||
self.metrics_list.format_verbose_metrics(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SeriesScan {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("SeriesScan")
|
||||
.field("num_ranges", &self.stream_ctx.ranges.len())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl SeriesScan {
|
||||
/// Returns the input.
|
||||
pub(crate) fn input(&self) -> &ScanInput {
|
||||
&self.stream_ctx.input
|
||||
}
|
||||
}
|
||||
|
||||
/// The distributor scans series and distributes them to different partitions.
|
||||
struct SeriesDistributor {
|
||||
/// Context for the scan stream.
|
||||
stream_ctx: Arc<StreamContext>,
|
||||
/// Optional semaphore for limiting the number of concurrent scans.
|
||||
semaphore: Option<Arc<Semaphore>>,
|
||||
/// Partition ranges to scan.
|
||||
partitions: Vec<Vec<PartitionRange>>,
|
||||
/// Senders of all partitions.
|
||||
senders: SenderList,
|
||||
/// Metrics set to report.
|
||||
/// The distributor report the metrics as an additional partition.
|
||||
/// This may double the scan cost of the [SeriesScan] metrics. We can
|
||||
/// get per-partition metrics in verbose mode to see the metrics of the
|
||||
/// distributor.
|
||||
metrics_set: ExecutionPlanMetricsSet,
|
||||
metrics_list: Arc<PartitionMetricsList>,
|
||||
}
|
||||
|
||||
impl SeriesDistributor {
|
||||
/// Executes the distributor.
|
||||
async fn execute(&mut self) {
|
||||
if let Err(e) = self.scan_partitions().await {
|
||||
self.senders.send_error(e).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Scans all parts.
|
||||
async fn scan_partitions(&mut self) -> Result<()> {
|
||||
let part_metrics = new_partition_metrics(
|
||||
&self.stream_ctx,
|
||||
&self.metrics_set,
|
||||
self.partitions.len(),
|
||||
&self.metrics_list,
|
||||
);
|
||||
part_metrics.on_first_poll();
|
||||
|
||||
let range_builder_list = Arc::new(RangeBuilderList::new(
|
||||
self.stream_ctx.input.num_memtables(),
|
||||
self.stream_ctx.input.num_files(),
|
||||
));
|
||||
// Scans all parts.
|
||||
let mut sources = Vec::with_capacity(self.partitions.len());
|
||||
for partition in &self.partitions {
|
||||
sources.reserve(partition.len());
|
||||
for part_range in partition {
|
||||
build_sources(
|
||||
&self.stream_ctx,
|
||||
part_range,
|
||||
false,
|
||||
&part_metrics,
|
||||
range_builder_list.clone(),
|
||||
&mut sources,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Builds a reader that merge sources from all parts.
|
||||
let mut reader =
|
||||
SeqScan::build_reader_from_sources(&self.stream_ctx, sources, self.semaphore.clone())
|
||||
.await?;
|
||||
let mut metrics = ScannerMetrics::default();
|
||||
let mut fetch_start = Instant::now();
|
||||
|
||||
let mut current_series = SeriesBatch::default();
|
||||
while let Some(batch) = reader.next_batch().await? {
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
fetch_start = Instant::now();
|
||||
metrics.num_batches += 1;
|
||||
metrics.num_rows += batch.num_rows();
|
||||
|
||||
debug_assert!(!batch.is_empty());
|
||||
if batch.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(last_key) = current_series.current_key() else {
|
||||
current_series.push(batch);
|
||||
continue;
|
||||
};
|
||||
|
||||
if last_key == batch.primary_key() {
|
||||
current_series.push(batch);
|
||||
continue;
|
||||
}
|
||||
|
||||
// We find a new series, send the current one.
|
||||
let to_send = std::mem::replace(&mut current_series, SeriesBatch::single(batch));
|
||||
let yield_start = Instant::now();
|
||||
self.senders.send_batch(to_send).await?;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
}
|
||||
|
||||
if !current_series.is_empty() {
|
||||
let yield_start = Instant::now();
|
||||
self.senders.send_batch(current_series).await?;
|
||||
metrics.yield_cost += yield_start.elapsed();
|
||||
}
|
||||
|
||||
metrics.scan_cost += fetch_start.elapsed();
|
||||
part_metrics.merge_metrics(&metrics);
|
||||
part_metrics.set_num_series_send_timeout(self.senders.num_timeout);
|
||||
|
||||
part_metrics.on_finish();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Batches of the same series.
|
||||
#[derive(Default)]
|
||||
struct SeriesBatch {
|
||||
batches: SmallVec<[Batch; 4]>,
|
||||
}
|
||||
|
||||
impl SeriesBatch {
|
||||
/// Creates a new [SeriesBatch] from a single [Batch].
|
||||
fn single(batch: Batch) -> Self {
|
||||
Self {
|
||||
batches: smallvec![batch],
|
||||
}
|
||||
}
|
||||
|
||||
fn current_key(&self) -> Option<&[u8]> {
|
||||
self.batches.first().map(|batch| batch.primary_key())
|
||||
}
|
||||
|
||||
fn push(&mut self, batch: Batch) {
|
||||
self.batches.push(batch);
|
||||
}
|
||||
|
||||
/// Returns true if there is no batch.
|
||||
fn is_empty(&self) -> bool {
|
||||
self.batches.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// List of senders.
|
||||
struct SenderList {
|
||||
senders: Vec<Option<Sender<Result<SeriesBatch>>>>,
|
||||
/// Number of None senders.
|
||||
num_nones: usize,
|
||||
/// Index of the current partition to send.
|
||||
sender_idx: usize,
|
||||
/// Number of timeout.
|
||||
num_timeout: usize,
|
||||
}
|
||||
|
||||
impl SenderList {
|
||||
fn new(senders: Vec<Option<Sender<Result<SeriesBatch>>>>) -> Self {
|
||||
let num_nones = senders.iter().filter(|sender| sender.is_none()).count();
|
||||
Self {
|
||||
senders,
|
||||
num_nones,
|
||||
sender_idx: 0,
|
||||
num_timeout: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds a partition and tries to send the batch to the partition.
|
||||
/// Returns None if it sends successfully.
|
||||
fn try_send_batch(&mut self, mut batch: SeriesBatch) -> Result<Option<SeriesBatch>> {
|
||||
for _ in 0..self.senders.len() {
|
||||
ensure!(self.num_nones < self.senders.len(), InvalidSenderSnafu);
|
||||
|
||||
let sender_idx = self.fetch_add_sender_idx();
|
||||
let Some(sender) = &self.senders[sender_idx] else {
|
||||
continue;
|
||||
};
|
||||
|
||||
match sender.try_send(Ok(batch)) {
|
||||
Ok(()) => return Ok(None),
|
||||
Err(TrySendError::Full(res)) => {
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
Err(TrySendError::Closed(res)) => {
|
||||
self.senders[sender_idx] = None;
|
||||
self.num_nones += 1;
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(batch))
|
||||
}
|
||||
|
||||
/// Finds a partition and sends the batch to the partition.
|
||||
async fn send_batch(&mut self, mut batch: SeriesBatch) -> Result<()> {
|
||||
// Sends the batch without blocking first.
|
||||
match self.try_send_batch(batch)? {
|
||||
Some(b) => {
|
||||
// Unable to send batch to partition.
|
||||
batch = b;
|
||||
}
|
||||
None => {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
ensure!(self.num_nones < self.senders.len(), InvalidSenderSnafu);
|
||||
|
||||
let sender_idx = self.fetch_add_sender_idx();
|
||||
let Some(sender) = &self.senders[sender_idx] else {
|
||||
continue;
|
||||
};
|
||||
// Adds a timeout to avoid blocking indefinitely and sending
|
||||
// the batch in a round-robin fashion when some partitions
|
||||
// don't poll their inputs. This may happen if we have a
|
||||
// node like sort merging. But it is rare when we are using SeriesScan.
|
||||
match sender.send_timeout(Ok(batch), SEND_TIMEOUT).await {
|
||||
Ok(()) => break,
|
||||
Err(SendTimeoutError::Timeout(res)) => {
|
||||
self.num_timeout += 1;
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
Err(SendTimeoutError::Closed(res)) => {
|
||||
self.senders[sender_idx] = None;
|
||||
self.num_nones += 1;
|
||||
// Safety: we send Ok.
|
||||
batch = res.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_error(&self, error: Error) {
|
||||
let error = Arc::new(error);
|
||||
for sender in self.senders.iter().flatten() {
|
||||
let result = Err(error.clone()).context(ScanSeriesSnafu);
|
||||
let _ = sender.send(result).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn fetch_add_sender_idx(&mut self) -> usize {
|
||||
let sender_idx = self.sender_idx;
|
||||
self.sender_idx = (self.sender_idx + 1) % self.senders.len();
|
||||
sender_idx
|
||||
}
|
||||
}
|
||||
|
||||
fn new_partition_metrics(
|
||||
stream_ctx: &StreamContext,
|
||||
metrics_set: &ExecutionPlanMetricsSet,
|
||||
partition: usize,
|
||||
metrics_list: &PartitionMetricsList,
|
||||
) -> PartitionMetrics {
|
||||
let metrics = PartitionMetrics::new(
|
||||
stream_ctx.input.mapper.metadata().region_id,
|
||||
partition,
|
||||
"SeriesScan",
|
||||
stream_ctx.query_start,
|
||||
metrics_set,
|
||||
);
|
||||
|
||||
metrics_list.set(partition, metrics.clone());
|
||||
metrics
|
||||
}
|
||||
@@ -346,7 +346,6 @@ impl BloomFilterIndexer {
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use std::iter;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
@@ -461,15 +460,15 @@ pub(crate) mod tests {
|
||||
|
||||
Batch::new(
|
||||
primary_key,
|
||||
Arc::new(UInt64Vector::from_iter_values(
|
||||
iter::repeat(0).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt64Vector::from_iter_values(
|
||||
iter::repeat(0).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt8Vector::from_iter_values(
|
||||
iter::repeat(1).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
|
||||
0, num_rows,
|
||||
))),
|
||||
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
|
||||
0, num_rows,
|
||||
))),
|
||||
Arc::new(UInt8Vector::from_iter_values(std::iter::repeat_n(
|
||||
1, num_rows,
|
||||
))),
|
||||
vec![u64_field],
|
||||
)
|
||||
.unwrap()
|
||||
|
||||
@@ -489,12 +489,12 @@ mod tests {
|
||||
Arc::new(UInt64Vector::from_iter_values(
|
||||
(0..num_rows).map(|n| n as u64),
|
||||
)),
|
||||
Arc::new(UInt64Vector::from_iter_values(
|
||||
std::iter::repeat(0).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt8Vector::from_iter_values(
|
||||
std::iter::repeat(1).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
|
||||
0, num_rows,
|
||||
))),
|
||||
Arc::new(UInt8Vector::from_iter_values(std::iter::repeat_n(
|
||||
1, num_rows,
|
||||
))),
|
||||
vec![
|
||||
BatchColumn {
|
||||
column_id: 1,
|
||||
|
||||
@@ -326,7 +326,6 @@ impl InvertedIndexer {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
use std::iter;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use datafusion_expr::{binary_expr, col, lit, Expr as DfExpr, Operator};
|
||||
@@ -424,15 +423,15 @@ mod tests {
|
||||
|
||||
Batch::new(
|
||||
primary_key,
|
||||
Arc::new(UInt64Vector::from_iter_values(
|
||||
iter::repeat(0).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt64Vector::from_iter_values(
|
||||
iter::repeat(0).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt8Vector::from_iter_values(
|
||||
iter::repeat(1).take(num_rows),
|
||||
)),
|
||||
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
|
||||
0, num_rows,
|
||||
))),
|
||||
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
|
||||
0, num_rows,
|
||||
))),
|
||||
Arc::new(UInt8Vector::from_iter_values(std::iter::repeat_n(
|
||||
1, num_rows,
|
||||
))),
|
||||
vec![u64_field],
|
||||
)
|
||||
.unwrap()
|
||||
|
||||
@@ -27,7 +27,7 @@ use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::TimeSeriesRowSelector;
|
||||
|
||||
use crate::error::{
|
||||
DecodeStatsSnafu, FieldTypeMismatchSnafu, FilterRecordBatchSnafu, Result, StatsNotPresentSnafu,
|
||||
DecodeStatsSnafu, FieldTypeMismatchSnafu, RecordBatchSnafu, Result, StatsNotPresentSnafu,
|
||||
};
|
||||
use crate::read::compat::CompatBatch;
|
||||
use crate::read::last_row::RowGroupLastRowCachedReader;
|
||||
@@ -294,7 +294,7 @@ impl RangeBase {
|
||||
};
|
||||
if filter
|
||||
.evaluate_scalar(&pk_value)
|
||||
.context(FilterRecordBatchSnafu)?
|
||||
.context(RecordBatchSnafu)?
|
||||
{
|
||||
continue;
|
||||
} else {
|
||||
@@ -311,11 +311,11 @@ impl RangeBase {
|
||||
let field_col = &input.fields()[field_index].data;
|
||||
filter
|
||||
.evaluate_vector(field_col)
|
||||
.context(FilterRecordBatchSnafu)?
|
||||
.context(RecordBatchSnafu)?
|
||||
}
|
||||
SemanticType::Timestamp => filter
|
||||
.evaluate_vector(input.timestamps())
|
||||
.context(FilterRecordBatchSnafu)?,
|
||||
.context(RecordBatchSnafu)?,
|
||||
};
|
||||
|
||||
mask = mask.bitand(&result);
|
||||
|
||||
@@ -755,7 +755,7 @@ mod tests {
|
||||
));
|
||||
let mut keys = vec![];
|
||||
for (index, num_rows) in pk_row_nums.iter().map(|v| v.1).enumerate() {
|
||||
keys.extend(std::iter::repeat(index as u32).take(num_rows));
|
||||
keys.extend(std::iter::repeat_n(index as u32, num_rows));
|
||||
}
|
||||
let keys = UInt32Array::from(keys);
|
||||
Arc::new(DictionaryArray::new(keys, values))
|
||||
|
||||
@@ -85,11 +85,9 @@ impl ImpureDefaultFiller {
|
||||
.schema
|
||||
.iter()
|
||||
.filter_map(|schema| {
|
||||
if self.impure_columns.contains_key(&schema.column_name) {
|
||||
Some(&schema.column_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.impure_columns
|
||||
.contains_key(&schema.column_name)
|
||||
.then_some(&schema.column_name)
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -325,7 +325,7 @@ impl std::str::FromStr for Pattern {
|
||||
|
||||
impl Pattern {
|
||||
fn check(&self) -> Result<()> {
|
||||
if self.len() == 0 {
|
||||
if self.is_empty() {
|
||||
return DissectEmptyPatternSnafu.fail();
|
||||
}
|
||||
|
||||
|
||||
@@ -91,9 +91,9 @@ impl UserDefinedLogicalNodeCore for InstantManipulate {
|
||||
_exprs: Vec<Expr>,
|
||||
inputs: Vec<LogicalPlan>,
|
||||
) -> DataFusionResult<Self> {
|
||||
if inputs.is_empty() {
|
||||
if inputs.len() != 1 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"InstantManipulate should have at least one input".to_string(),
|
||||
"InstantManipulate should have exact one input".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -354,6 +354,9 @@ impl Stream for InstantManipulateStream {
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let poll = match ready!(self.input.poll_next_unpin(cx)) {
|
||||
Some(Ok(batch)) => {
|
||||
if batch.num_rows() == 0 {
|
||||
return Poll::Pending;
|
||||
}
|
||||
let timer = std::time::Instant::now();
|
||||
self.num_series.add(1);
|
||||
let result = Ok(batch).and_then(|batch| self.manipulate(batch));
|
||||
|
||||
@@ -42,7 +42,7 @@ use greptime_proto::substrait_extension as pb;
|
||||
use prost::Message;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{DataFusionPlanningSnafu, DeserializeSnafu, Result};
|
||||
use crate::error::{DeserializeSnafu, Result};
|
||||
use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES};
|
||||
use crate::metrics::PROMQL_SERIES_COUNT;
|
||||
use crate::range_array::RangeArray;
|
||||
@@ -194,20 +194,26 @@ impl RangeManipulate {
|
||||
|
||||
pub fn deserialize(bytes: &[u8]) -> Result<Self> {
|
||||
let pb_range_manipulate = pb::RangeManipulate::decode(bytes).context(DeserializeSnafu)?;
|
||||
let empty_schema = Arc::new(DFSchema::empty());
|
||||
let placeholder_plan = LogicalPlan::EmptyRelation(EmptyRelation {
|
||||
produce_one_row: false,
|
||||
schema: Arc::new(DFSchema::empty()),
|
||||
schema: empty_schema.clone(),
|
||||
});
|
||||
Self::new(
|
||||
pb_range_manipulate.start,
|
||||
pb_range_manipulate.end,
|
||||
pb_range_manipulate.interval,
|
||||
pb_range_manipulate.range,
|
||||
pb_range_manipulate.time_index,
|
||||
pb_range_manipulate.tag_columns,
|
||||
placeholder_plan,
|
||||
)
|
||||
.context(DataFusionPlanningSnafu)
|
||||
|
||||
// Unlike `Self::new()`, this method doesn't check the input schema as it will fail
|
||||
// because the input schema is empty.
|
||||
// But this is Ok since datafusion guarantees to call `with_exprs_and_inputs` for the
|
||||
// deserialized plan.
|
||||
Ok(Self {
|
||||
start: pb_range_manipulate.start,
|
||||
end: pb_range_manipulate.end,
|
||||
interval: pb_range_manipulate.interval,
|
||||
range: pb_range_manipulate.range,
|
||||
time_index: pb_range_manipulate.time_index,
|
||||
field_columns: pb_range_manipulate.tag_columns,
|
||||
input: placeholder_plan,
|
||||
output_schema: empty_schema,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,14 +276,19 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||
fn with_exprs_and_inputs(
|
||||
&self,
|
||||
_exprs: Vec<Expr>,
|
||||
inputs: Vec<LogicalPlan>,
|
||||
mut inputs: Vec<LogicalPlan>,
|
||||
) -> DataFusionResult<Self> {
|
||||
if inputs.is_empty() {
|
||||
if inputs.len() != 1 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"RangeManipulate should have at least one input".to_string(),
|
||||
"RangeManipulate should have at exact one input".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let input: LogicalPlan = inputs.pop().unwrap();
|
||||
let input_schema = input.schema();
|
||||
let output_schema =
|
||||
Self::calculate_output_schema(input_schema, &self.time_index, &self.field_columns)?;
|
||||
|
||||
Ok(Self {
|
||||
start: self.start,
|
||||
end: self.end,
|
||||
@@ -285,8 +296,8 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
|
||||
range: self.range,
|
||||
time_index: self.time_index.clone(),
|
||||
field_columns: self.field_columns.clone(),
|
||||
input: inputs.into_iter().next().unwrap(),
|
||||
output_schema: self.output_schema.clone(),
|
||||
input,
|
||||
output_schema,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,6 +106,10 @@ impl SeriesDivide {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tags(&self) -> &[String] {
|
||||
&self.tag_columns
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
pb::SeriesDivide {
|
||||
tag_columns: self.tag_columns.clone(),
|
||||
@@ -315,7 +319,9 @@ impl Stream for SeriesDivideStream {
|
||||
let next_batch = ready!(self.as_mut().fetch_next_batch(cx)).transpose()?;
|
||||
let timer = std::time::Instant::now();
|
||||
if let Some(next_batch) = next_batch {
|
||||
self.buffer.push(next_batch);
|
||||
if next_batch.num_rows() != 0 {
|
||||
self.buffer.push(next_batch);
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
// input stream is ended
|
||||
|
||||
@@ -40,7 +40,7 @@ pub use holt_winters::HoltWinters;
|
||||
pub use idelta::IDelta;
|
||||
pub use predict_linear::PredictLinear;
|
||||
pub use quantile::QuantileOverTime;
|
||||
pub use quantile_aggr::quantile_udaf;
|
||||
pub use quantile_aggr::{quantile_udaf, QUANTILE_NAME};
|
||||
pub use resets::Resets;
|
||||
pub use round::Round;
|
||||
|
||||
|
||||
@@ -228,7 +228,7 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
|
||||
|
||||
// delta
|
||||
impl ExtrapolatedRate<false, false> {
|
||||
pub fn name() -> &'static str {
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_delta"
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ impl ExtrapolatedRate<false, false> {
|
||||
|
||||
// rate
|
||||
impl ExtrapolatedRate<true, true> {
|
||||
pub fn name() -> &'static str {
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_rate"
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ impl ExtrapolatedRate<true, true> {
|
||||
|
||||
// increase
|
||||
impl ExtrapolatedRate<true, false> {
|
||||
pub fn name() -> &'static str {
|
||||
pub const fn name() -> &'static str {
|
||||
"prom_increase"
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ use datatypes::arrow::datatypes::{DataType, Field, Float64Type};
|
||||
|
||||
use crate::functions::quantile::quantile_impl;
|
||||
|
||||
const QUANTILE_NAME: &str = "quantile";
|
||||
pub const QUANTILE_NAME: &str = "quantile";
|
||||
|
||||
const VALUES_FIELD_NAME: &str = "values";
|
||||
const DEFAULT_LIST_FIELD_NAME: &str = "item";
|
||||
|
||||
@@ -55,12 +55,16 @@ impl Categorizer {
|
||||
LogicalPlan::Filter(filter) => Self::check_expr(&filter.predicate),
|
||||
LogicalPlan::Window(_) => Commutativity::Unimplemented,
|
||||
LogicalPlan::Aggregate(aggr) => {
|
||||
if Self::check_partition(&aggr.group_expr, &partition_cols) {
|
||||
return Commutativity::Commutative;
|
||||
if !Self::check_partition(&aggr.group_expr, &partition_cols) {
|
||||
return Commutativity::NonCommutative;
|
||||
}
|
||||
|
||||
// check all children exprs and uses the strictest level
|
||||
Commutativity::Unimplemented
|
||||
for expr in &aggr.aggr_expr {
|
||||
let commutativity = Self::check_expr(expr);
|
||||
if !matches!(commutativity, Commutativity::Commutative) {
|
||||
return commutativity;
|
||||
}
|
||||
}
|
||||
Commutativity::Commutative
|
||||
}
|
||||
LogicalPlan::Sort(_) => {
|
||||
if partition_cols.is_empty() {
|
||||
@@ -94,7 +98,7 @@ impl Categorizer {
|
||||
}
|
||||
}
|
||||
LogicalPlan::Extension(extension) => {
|
||||
Self::check_extension_plan(extension.node.as_ref() as _)
|
||||
Self::check_extension_plan(extension.node.as_ref() as _, &partition_cols)
|
||||
}
|
||||
LogicalPlan::Distinct(_) => {
|
||||
if partition_cols.is_empty() {
|
||||
@@ -116,13 +120,30 @@ impl Categorizer {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_extension_plan(plan: &dyn UserDefinedLogicalNode) -> Commutativity {
|
||||
pub fn check_extension_plan(
|
||||
plan: &dyn UserDefinedLogicalNode,
|
||||
partition_cols: &[String],
|
||||
) -> Commutativity {
|
||||
match plan.name() {
|
||||
name if name == EmptyMetric::name()
|
||||
name if name == SeriesDivide::name() => {
|
||||
let series_divide = plan.as_any().downcast_ref::<SeriesDivide>().unwrap();
|
||||
let tags = series_divide.tags().iter().collect::<HashSet<_>>();
|
||||
for partition_col in partition_cols {
|
||||
if !tags.contains(partition_col) {
|
||||
return Commutativity::NonCommutative;
|
||||
}
|
||||
}
|
||||
Commutativity::Commutative
|
||||
}
|
||||
name if name == SeriesNormalize::name()
|
||||
|| name == InstantManipulate::name()
|
||||
|| name == SeriesNormalize::name()
|
||||
|| name == RangeManipulate::name()
|
||||
|| name == SeriesDivide::name()
|
||||
|| name == RangeManipulate::name() =>
|
||||
{
|
||||
// They should always follows Series Divide.
|
||||
// Either all commutative or all non-commutative (which will be blocked by SeriesDivide).
|
||||
Commutativity::Commutative
|
||||
}
|
||||
name if name == EmptyMetric::name()
|
||||
|| name == MergeScanLogicalPlan::name()
|
||||
|| name == MergeSortLogicalPlan::name() =>
|
||||
{
|
||||
@@ -148,8 +169,9 @@ impl Categorizer {
|
||||
| Expr::Negative(_)
|
||||
| Expr::Between(_)
|
||||
| Expr::Exists(_)
|
||||
| Expr::InList(_)
|
||||
| Expr::ScalarFunction(_) => Commutativity::Commutative,
|
||||
| Expr::InList(_) => Commutativity::Commutative,
|
||||
Expr::ScalarFunction(_udf) => Commutativity::Commutative,
|
||||
Expr::AggregateFunction(_udaf) => Commutativity::Commutative,
|
||||
|
||||
Expr::Like(_)
|
||||
| Expr::SimilarTo(_)
|
||||
@@ -158,7 +180,6 @@ impl Categorizer {
|
||||
| Expr::Case(_)
|
||||
| Expr::Cast(_)
|
||||
| Expr::TryCast(_)
|
||||
| Expr::AggregateFunction(_)
|
||||
| Expr::WindowFunction(_)
|
||||
| Expr::InSubquery(_)
|
||||
| Expr::ScalarSubquery(_)
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
#![feature(let_chains)]
|
||||
#![feature(int_roundings)]
|
||||
#![feature(trait_upcasting)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
#![feature(iterator_try_collect)]
|
||||
|
||||
@@ -62,21 +62,28 @@ impl ParallelizeScan {
|
||||
} else if let Some(region_scan_exec) =
|
||||
plan.as_any().downcast_ref::<RegionScanExec>()
|
||||
{
|
||||
let expected_partition_num = config.execution.target_partitions;
|
||||
if region_scan_exec.is_partition_set() {
|
||||
return Ok(Transformed::no(plan));
|
||||
}
|
||||
|
||||
// don't parallelize if we want per series distribution
|
||||
if matches!(
|
||||
region_scan_exec.distribution(),
|
||||
Some(TimeSeriesDistribution::PerSeries)
|
||||
) {
|
||||
return Ok(Transformed::no(plan));
|
||||
let partition_range = region_scan_exec.get_partition_ranges();
|
||||
// HACK: Allocate expected_partition_num empty partitions to indicate
|
||||
// the expected partition number.
|
||||
let mut new_partitions = vec![vec![]; expected_partition_num];
|
||||
new_partitions[0] = partition_range;
|
||||
let new_plan = region_scan_exec
|
||||
.with_new_partitions(new_partitions, expected_partition_num)
|
||||
.map_err(|e| DataFusionError::External(e.into_inner()))?;
|
||||
return Ok(Transformed::yes(Arc::new(new_plan)));
|
||||
}
|
||||
|
||||
let ranges = region_scan_exec.get_partition_ranges();
|
||||
let total_range_num = ranges.len();
|
||||
let expected_partition_num = config.execution.target_partitions;
|
||||
|
||||
// assign ranges to each partition
|
||||
let mut partition_ranges =
|
||||
@@ -131,26 +138,18 @@ impl ParallelizeScan {
|
||||
) -> Vec<Vec<PartitionRange>> {
|
||||
if ranges.is_empty() {
|
||||
// Returns a single partition with no range.
|
||||
return vec![vec![]];
|
||||
return vec![vec![]; expected_partition_num];
|
||||
}
|
||||
|
||||
if ranges.len() == 1 {
|
||||
return vec![ranges];
|
||||
let mut vec = vec![vec![]; expected_partition_num];
|
||||
vec[0] = ranges;
|
||||
return vec;
|
||||
}
|
||||
|
||||
// Sort ranges by number of rows in descending order.
|
||||
ranges.sort_by(|a, b| b.num_rows.cmp(&a.num_rows));
|
||||
// Get the max row number of the ranges. Note that the number of rows may be 0 if statistics are not available.
|
||||
let max_rows = ranges[0].num_rows;
|
||||
let total_rows = ranges.iter().map(|range| range.num_rows).sum::<usize>();
|
||||
// Computes the partition num by the max row number. This eliminates the unbalance of the partitions.
|
||||
let balanced_partition_num = if max_rows > 0 {
|
||||
total_rows.div_ceil(max_rows)
|
||||
} else {
|
||||
ranges.len()
|
||||
};
|
||||
let actual_partition_num = expected_partition_num.min(balanced_partition_num).max(1);
|
||||
let mut partition_ranges = vec![vec![]; actual_partition_num];
|
||||
let mut partition_ranges = vec![vec![]; expected_partition_num];
|
||||
|
||||
#[derive(Eq, PartialEq)]
|
||||
struct HeapNode {
|
||||
@@ -172,7 +171,7 @@ impl ParallelizeScan {
|
||||
}
|
||||
|
||||
let mut part_heap =
|
||||
BinaryHeap::from_iter((0..actual_partition_num).map(|partition_idx| HeapNode {
|
||||
BinaryHeap::from_iter((0..expected_partition_num).map(|partition_idx| HeapNode {
|
||||
num_rows: 0,
|
||||
partition_idx,
|
||||
}));
|
||||
@@ -263,7 +262,7 @@ mod test {
|
||||
];
|
||||
assert_eq!(result, expected);
|
||||
|
||||
// assign 4 ranges to 5 partitions. Only 4 partitions are returned.
|
||||
// assign 4 ranges to 5 partitions.
|
||||
let expected_partition_num = 5;
|
||||
let result = ParallelizeScan::assign_partition_range(ranges, expected_partition_num);
|
||||
let expected = vec![
|
||||
@@ -273,32 +272,31 @@ mod test {
|
||||
num_rows: 250,
|
||||
identifier: 4,
|
||||
}],
|
||||
vec![PartitionRange {
|
||||
start: Timestamp::new(0, TimeUnit::Second),
|
||||
end: Timestamp::new(10, TimeUnit::Second),
|
||||
num_rows: 100,
|
||||
identifier: 1,
|
||||
}],
|
||||
vec![PartitionRange {
|
||||
start: Timestamp::new(10, TimeUnit::Second),
|
||||
end: Timestamp::new(20, TimeUnit::Second),
|
||||
num_rows: 200,
|
||||
identifier: 2,
|
||||
}],
|
||||
vec![
|
||||
PartitionRange {
|
||||
start: Timestamp::new(20, TimeUnit::Second),
|
||||
end: Timestamp::new(30, TimeUnit::Second),
|
||||
num_rows: 150,
|
||||
identifier: 3,
|
||||
},
|
||||
PartitionRange {
|
||||
start: Timestamp::new(0, TimeUnit::Second),
|
||||
end: Timestamp::new(10, TimeUnit::Second),
|
||||
num_rows: 100,
|
||||
identifier: 1,
|
||||
},
|
||||
],
|
||||
vec![],
|
||||
vec![PartitionRange {
|
||||
start: Timestamp::new(20, TimeUnit::Second),
|
||||
end: Timestamp::new(30, TimeUnit::Second),
|
||||
num_rows: 150,
|
||||
identifier: 3,
|
||||
}],
|
||||
];
|
||||
assert_eq!(result, expected);
|
||||
|
||||
// assign 0 ranges to 5 partitions. Only 1 partition is returned.
|
||||
// assign 0 ranges to 5 partitions. Should return 5 empty ranges.
|
||||
let result = ParallelizeScan::assign_partition_range(vec![], 5);
|
||||
assert_eq!(result.len(), 1);
|
||||
assert_eq!(result.len(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -348,7 +348,7 @@ impl PartSortStream {
|
||||
&self,
|
||||
sort_column: &ArrayRef,
|
||||
) -> datafusion_common::Result<Option<usize>> {
|
||||
if sort_column.len() == 0 {
|
||||
if sort_column.is_empty() {
|
||||
return Ok(Some(0));
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,11 @@ use datafusion::execution::{FunctionRegistry, SessionStateBuilder};
|
||||
use datafusion::logical_expr::LogicalPlan;
|
||||
use datafusion_expr::UserDefinedLogicalNode;
|
||||
use greptime_proto::substrait_extension::MergeScan as PbMergeScan;
|
||||
use promql::functions::{
|
||||
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, IDelta, Increase,
|
||||
LastOverTime, MaxOverTime, MinOverTime, PresentOverTime, Rate, Resets, StddevOverTime,
|
||||
StdvarOverTime, SumOverTime,
|
||||
};
|
||||
use prost::Message;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
@@ -132,6 +137,26 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl()));
|
||||
let _ = session_state.register_udaf(Arc::new(GeoPathAccumulator::udf_impl()));
|
||||
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<false>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(IDelta::<true>::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Rate::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Increase::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Delta::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Resets::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Changes::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf()));
|
||||
let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf()));
|
||||
// TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round
|
||||
}
|
||||
let logical_plan = DFLogicalSubstraitConvertor
|
||||
.decode(message, session_state)
|
||||
|
||||
@@ -31,6 +31,7 @@ use datafusion::error::Result as DfResult;
|
||||
use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionContext, SessionState};
|
||||
use datafusion::execution::runtime_env::RuntimeEnv;
|
||||
use datafusion::execution::SessionStateBuilder;
|
||||
use datafusion::physical_optimizer::enforce_sorting::EnforceSorting;
|
||||
use datafusion::physical_optimizer::optimizer::PhysicalOptimizer;
|
||||
use datafusion::physical_optimizer::sanity_checker::SanityCheckPlan;
|
||||
use datafusion::physical_optimizer::PhysicalOptimizerRule;
|
||||
@@ -142,6 +143,9 @@ impl QueryEngineState {
|
||||
physical_optimizer
|
||||
.rules
|
||||
.insert(1, Arc::new(PassDistribution));
|
||||
physical_optimizer
|
||||
.rules
|
||||
.insert(2, Arc::new(EnforceSorting {}));
|
||||
// Add rule for windowed sort
|
||||
physical_optimizer
|
||||
.rules
|
||||
|
||||
@@ -117,7 +117,7 @@ where
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
match self.inner.poll_ready(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
|
||||
Poll::Ready(r) => Poll::Ready(r),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
#![feature(exclusive_wrapper)]
|
||||
#![feature(let_chains)]
|
||||
#![feature(if_let_guard)]
|
||||
#![feature(trait_upcasting)]
|
||||
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use datatypes::schema::Schema;
|
||||
|
||||
@@ -55,7 +55,7 @@ pub fn transform_statements(stmts: &mut Vec<Statement>) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
visit_expressions_mut(stmts, |expr| {
|
||||
let _ = visit_expressions_mut(stmts, |expr| {
|
||||
for rule in RULES.iter() {
|
||||
rule.visit_expr(expr)?;
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ impl RegionMetadata {
|
||||
pub fn project(&self, projection: &[ColumnId]) -> Result<RegionMetadata> {
|
||||
// check time index
|
||||
ensure!(
|
||||
projection.iter().any(|id| *id == self.time_index),
|
||||
projection.contains(&self.time_index),
|
||||
TimeIndexNotFoundSnafu
|
||||
);
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(let_chains)]
|
||||
|
||||
pub mod dist_table;
|
||||
pub mod error;
|
||||
|
||||
@@ -95,7 +95,7 @@ impl TableProvider for DfTableProviderAdapter {
|
||||
filters: &[Expr],
|
||||
limit: Option<usize>,
|
||||
) -> DfResult<Arc<dyn ExecutionPlan>> {
|
||||
let filters: Vec<Expr> = filters.iter().map(Clone::clone).map(Into::into).collect();
|
||||
let filters: Vec<Expr> = filters.iter().map(Clone::clone).collect();
|
||||
let request = {
|
||||
let mut request = self.scan_req.lock().unwrap();
|
||||
request.filters = filters;
|
||||
|
||||
@@ -82,11 +82,17 @@ impl RegionScanExec {
|
||||
if scanner.properties().is_logical_region() {
|
||||
pk_names.sort_unstable();
|
||||
}
|
||||
let mut pk_columns: Vec<PhysicalSortExpr> = pk_names
|
||||
.into_iter()
|
||||
let pk_columns = pk_names
|
||||
.iter()
|
||||
.filter_map(
|
||||
|col| Some(Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _),
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
let mut pk_sort_columns: Vec<PhysicalSortExpr> = pk_names
|
||||
.iter()
|
||||
.filter_map(|col| {
|
||||
Some(PhysicalSortExpr::new(
|
||||
Arc::new(Column::new_with_schema(&col, &arrow_schema).ok()?) as _,
|
||||
Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _,
|
||||
SortOptions {
|
||||
descending: false,
|
||||
nulls_first: true,
|
||||
@@ -113,28 +119,37 @@ impl RegionScanExec {
|
||||
let eq_props = match request.distribution {
|
||||
Some(TimeSeriesDistribution::PerSeries) => {
|
||||
if let Some(ts) = ts_col {
|
||||
pk_columns.push(ts);
|
||||
pk_sort_columns.push(ts);
|
||||
}
|
||||
EquivalenceProperties::new_with_orderings(
|
||||
arrow_schema.clone(),
|
||||
&[LexOrdering::new(pk_columns)],
|
||||
&[LexOrdering::new(pk_sort_columns)],
|
||||
)
|
||||
}
|
||||
Some(TimeSeriesDistribution::TimeWindowed) => {
|
||||
if let Some(ts_col) = ts_col {
|
||||
pk_columns.insert(0, ts_col);
|
||||
pk_sort_columns.insert(0, ts_col);
|
||||
}
|
||||
EquivalenceProperties::new_with_orderings(
|
||||
arrow_schema.clone(),
|
||||
&[LexOrdering::new(pk_columns)],
|
||||
&[LexOrdering::new(pk_sort_columns)],
|
||||
)
|
||||
}
|
||||
None => EquivalenceProperties::new(arrow_schema.clone()),
|
||||
};
|
||||
|
||||
let partitioning = match request.distribution {
|
||||
Some(TimeSeriesDistribution::PerSeries) => {
|
||||
Partitioning::Hash(pk_columns.clone(), num_output_partition)
|
||||
}
|
||||
Some(TimeSeriesDistribution::TimeWindowed) | None => {
|
||||
Partitioning::UnknownPartitioning(num_output_partition)
|
||||
}
|
||||
};
|
||||
|
||||
let properties = PlanProperties::new(
|
||||
eq_props,
|
||||
Partitioning::UnknownPartitioning(num_output_partition),
|
||||
partitioning,
|
||||
EmissionType::Incremental,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
@@ -188,9 +203,14 @@ impl RegionScanExec {
|
||||
warn!("Setting partition ranges more than once for RegionScanExec");
|
||||
}
|
||||
|
||||
let num_partitions = partitions.len();
|
||||
let mut properties = self.properties.clone();
|
||||
properties.partitioning = Partitioning::UnknownPartitioning(num_partitions);
|
||||
let new_partitioning = match properties.partitioning {
|
||||
Partitioning::Hash(ref columns, _) => {
|
||||
Partitioning::Hash(columns.clone(), target_partitions)
|
||||
}
|
||||
_ => Partitioning::UnknownPartitioning(target_partitions),
|
||||
};
|
||||
properties.partitioning = new_partitioning;
|
||||
|
||||
{
|
||||
let mut scanner = self.scanner.lock().unwrap();
|
||||
|
||||
@@ -85,11 +85,7 @@ pub struct UnstableTestVariables {
|
||||
pub fn load_unstable_test_env_variables() -> UnstableTestVariables {
|
||||
let _ = dotenv::dotenv();
|
||||
let binary_path = env::var(GT_FUZZ_BINARY_PATH).expect("GT_FUZZ_BINARY_PATH not found");
|
||||
let root_dir = if let Ok(root) = env::var(GT_FUZZ_INSTANCE_ROOT_DIR) {
|
||||
Some(root)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let root_dir = env::var(GT_FUZZ_INSTANCE_ROOT_DIR).ok();
|
||||
|
||||
UnstableTestVariables {
|
||||
binary_path,
|
||||
|
||||
@@ -157,7 +157,7 @@ async fn execute_unstable_create_table(
|
||||
}
|
||||
Err(err) => {
|
||||
// FIXME(weny): support to retry it later.
|
||||
if matches!(err, sqlx::Error::PoolTimedOut { .. }) {
|
||||
if matches!(err, sqlx::Error::PoolTimedOut) {
|
||||
warn!("ignore pool timeout, sql: {sql}");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -489,10 +489,7 @@ async fn create_datanode_client(datanode: &Datanode) -> (String, Client) {
|
||||
if let Some(client) = client {
|
||||
Ok(TokioIo::new(client))
|
||||
} else {
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"Client already taken",
|
||||
))
|
||||
Err(std::io::Error::other("Client already taken"))
|
||||
}
|
||||
}
|
||||
}),
|
||||
|
||||
@@ -130,8 +130,7 @@ tql eval (3000, 3000, '1s') label_replace(histogram_quantile(0.8, histogram_buck
|
||||
-- quantile with rate is covered in other cases
|
||||
tql eval (3000, 3000, '1s') histogram_quantile(0.2, rate(histogram_bucket[5m]));
|
||||
|
||||
++
|
||||
++
|
||||
Error: 3001(EngineExecuteQuery), Unsupported arrow data type, type: Dictionary(Int64, Float64)
|
||||
|
||||
drop table histogram_bucket;
|
||||
|
||||
|
||||
@@ -17,11 +17,14 @@ tql analyze (1, 3, '1s') t1{ a = "a" };
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 3_|
|
||||
+-+-+-+
|
||||
@@ -37,11 +40,14 @@ tql analyze (1, 3, '1s') t1{ a =~ ".*" };
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 6_|
|
||||
+-+-+-+
|
||||
@@ -57,11 +63,14 @@ tql analyze (1, 3, '1s') t1{ a =~ "a.*" };
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|
||||
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 3_|
|
||||
+-+-+-+
|
||||
|
||||
@@ -3,7 +3,7 @@ create database pg_catalog;
|
||||
|
||||
Error: 1004(InvalidArguments), Schema pg_catalog already exists
|
||||
|
||||
-- session_user because session_user is based on the current user so is not null is for test
|
||||
-- session_user because session_user is based on the current user so is not null is for test
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT session_user is not null;
|
||||
|
||||
@@ -107,12 +107,13 @@ select * from pg_catalog.pg_type order by oid;
|
||||
+-----+-----------+--------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
select * from pg_catalog.pg_database where datname = 'public';
|
||||
|
||||
+------------+---------+
|
||||
| oid | datname |
|
||||
+------------+---------+
|
||||
| 3927743705 | public |
|
||||
| OID| public |
|
||||
+------------+---------+
|
||||
|
||||
-- \d
|
||||
@@ -159,15 +160,16 @@ ORDER BY 1,2;
|
||||
|
||||
-- make sure oid of namespace keep stable
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT * FROM pg_namespace ORDER BY oid;
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
SELECT * FROM pg_namespace ORDER BY nspname;
|
||||
|
||||
+------------+--------------------+
|
||||
| oid | nspname |
|
||||
+------------+--------------------+
|
||||
| 667359454 | pg_catalog |
|
||||
| 3174397350 | information_schema |
|
||||
| 3338153620 | greptime_private |
|
||||
| 3927743705 | public |
|
||||
| OID| greptime_private |
|
||||
| OID| information_schema |
|
||||
| OID| pg_catalog |
|
||||
| OID| public |
|
||||
+------------+--------------------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
@@ -260,6 +262,7 @@ where relnamespace in (
|
||||
+---------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
select relnamespace, relname, relkind
|
||||
from pg_catalog.pg_class
|
||||
where relnamespace in (
|
||||
@@ -274,7 +277,7 @@ order by relnamespace, relname;
|
||||
+--------------+---------+---------+
|
||||
| relnamespace | relname | relkind |
|
||||
+--------------+---------+---------+
|
||||
| 434869349 | foo | r |
|
||||
| OID| foo | r |
|
||||
+--------------+---------+---------+
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
-- should not able to create pg_catalog
|
||||
create database pg_catalog;
|
||||
|
||||
-- session_user because session_user is based on the current user so is not null is for test
|
||||
-- session_user because session_user is based on the current user so is not null is for test
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT session_user is not null;
|
||||
|
||||
@@ -34,6 +34,7 @@ select * from pg_catalog.pg_database;
|
||||
select * from pg_catalog.pg_type order by oid;
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
select * from pg_catalog.pg_database where datname = 'public';
|
||||
|
||||
-- \d
|
||||
@@ -68,7 +69,8 @@ ORDER BY 1,2;
|
||||
|
||||
-- make sure oid of namespace keep stable
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
SELECT * FROM pg_namespace ORDER BY oid;
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
SELECT * FROM pg_namespace ORDER BY nspname;
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
create database my_db;
|
||||
@@ -128,6 +130,7 @@ where relnamespace in (
|
||||
);
|
||||
|
||||
-- SQLNESS PROTOCOL POSTGRES
|
||||
-- SQLNESS REPLACE (\d+\s*) OID
|
||||
select relnamespace, relname, relkind
|
||||
from pg_catalog.pg_class
|
||||
where relnamespace in (
|
||||
|
||||
@@ -19,11 +19,14 @@ TQL ANALYZE (0, 10, '5s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -41,11 +44,14 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -62,11 +68,14 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -85,11 +94,14 @@ TQL ANALYZE VERBOSE (0, 10, '5s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries, projection=["i", "j", "k"], filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(310000, None)], REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 4_|
|
||||
+-+-+-+
|
||||
@@ -114,13 +126,23 @@ TQL ANALYZE (0, 10, '5s') test;
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
| 0_| 0_|_SortPreservingMergeExec: [k@2 ASC, l@3 ASC, j@1 ASC] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@1 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 1_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
@@ -144,9 +166,21 @@ TQL ANALYZE (0, 10, '5s') rate(test[10s]);
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 0_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
| 1_| 1_|_PromRangeManipulateExec: req range=[0..10000], interval=[5000], eval range=[10000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
@@ -12,18 +12,13 @@ Affected Rows: 3
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
TQL EXPLAIN (0, 10, '5s') test;
|
||||
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivide: tags=["k"] |
|
||||
| | Projection: test.i, test.j, test.k |
|
||||
| | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivideExec: tags=["k"] |
|
||||
| | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
+---------------+-------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-------------------------------------------------+
|
||||
| logical_plan | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-------------------------------------------------+
|
||||
|
||||
-- 'lookback' parameter is not fully supported, the test has to be updated
|
||||
-- explain at 0s, 5s and 10s. No point at 0s.
|
||||
@@ -31,36 +26,26 @@ TQL EXPLAIN (0, 10, '5s') test;
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
TQL EXPLAIN (0, 10, '1s', '2s') test;
|
||||
|
||||
+---------------+---------------------------------------------------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+---------------------------------------------------------------------------------------------+
|
||||
| logical_plan | PromInstantManipulate: range=[0..0], lookback=[2000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivide: tags=["k"] |
|
||||
| | Projection: test.i, test.j, test.k |
|
||||
| | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[2000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivideExec: tags=["k"] |
|
||||
| | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+---------------------------------------------------------------------------------------------+
|
||||
+---------------+-------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-------------------------------------------------+
|
||||
| logical_plan | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-------------------------------------------------+
|
||||
|
||||
-- explain at 0s, 5s and 10s. No point at 0s.
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
TQL EXPLAIN ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
|
||||
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivide: tags=["k"] |
|
||||
| | Projection: test.i, test.j, test.k |
|
||||
| | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
|
||||
| | PromSeriesDivideExec: tags=["k"] |
|
||||
| | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-----------------------------------------------------------------------------------------------+
|
||||
+---------------+-------------------------------------------------+
|
||||
| plan_type | plan |
|
||||
+---------------+-------------------------------------------------+
|
||||
| logical_plan | MergeScan [is_placeholder=false] |
|
||||
| physical_plan | MergeScanExec: REDACTED
|
||||
| | |
|
||||
+---------------+-------------------------------------------------+
|
||||
|
||||
-- explain verbose at 0s, 5s and 10s. No point at 0s.
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
@@ -85,9 +70,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| logical_plan after expand_wildcard_rule_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after resolve_grouping_function_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after type_coercion_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after DistPlannerAnalyzer_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivide: tags=["k"]_|
|
||||
|_|_Projection: test.i, test.j, test.k_|
|
||||
| logical_plan after DistPlannerAnalyzer_| Projection: test.i, test.j, test.k_|
|
||||
|_|_MergeScan [is_placeholder=false]_|
|
||||
| analyzed_logical_plan_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_nested_union_| SAME TEXT AS ABOVE_|
|
||||
@@ -114,37 +97,45 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after optimize_projections_| MergeScan [is_placeholder=false]_|
|
||||
| logical_plan after ScanHintRule_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_nested_union_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after replace_distinct_aggregate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after decorrelate_predicate_subquery_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after scalar_subquery_to_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after extract_equijoin_predicate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_duplicated_expr_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_filter_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_cross_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_limit_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after propagate_empty_relation_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_one_union_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after filter_null_join_keys_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_outer_join_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after push_down_limit_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after push_down_filter_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after single_distinct_aggregation_to_group_by | SAME TEXT AS ABOVE_|
|
||||
| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after optimize_projections_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan after ScanHintRule_| SAME TEXT AS ABOVE_|
|
||||
| logical_plan_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivide: tags=["k"]_|
|
||||
|_|_Projection: test.i, test.j, test.k_|
|
||||
|_|_MergeScan [is_placeholder=false]_|
|
||||
| initial_physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| logical_plan_| MergeScan [is_placeholder=false]_|
|
||||
| initial_physical_plan_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| initial_physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] |
|
||||
|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| initial_physical_plan_with_stats_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| initial_physical_plan_with_schema_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| initial_physical_plan_with_schema_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after parallelize_scan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan after parallelize_scan_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after PassDistributionRule_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after OutputRequirements_| OutputRequirementExec_|
|
||||
|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_ProjectionExec: expr=[i@0 as i, j@1 as j, k@2 as k]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after aggregate_statistics_| SAME TEXT AS ABOVE_|
|
||||
@@ -154,15 +145,9 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| physical_plan after CombinePartialFinalAggregate_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after EnforceSorting_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after OptimizeAggregateOrder_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after ProjectionPushdown_| OutputRequirementExec_|
|
||||
|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after coalesce_batches_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after OutputRequirements_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan after OutputRequirements_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
|
||||
@@ -171,17 +156,11 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|
||||
| physical_plan after MatchesConstantTerm_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after RemoveDuplicateRule_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan after SanityCheckPlan_| SAME TEXT AS ABOVE_|
|
||||
| physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] |
|
||||
|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan_with_stats_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
| physical_plan_with_schema_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_PromSeriesDivideExec: tags=["k"], schema=[i:Float64;N, j:Timestamp(Millisecond, None), k:Utf8;N]_|
|
||||
|_|_MergeScanExec: REDACTED
|
||||
| physical_plan_with_schema_| MergeScanExec: REDACTED
|
||||
|_|_|
|
||||
+-+-+
|
||||
|
||||
|
||||
164
tests/cases/standalone/common/tql/partition.result
Normal file
164
tests/cases/standalone/common/tql/partition.result
Normal file
@@ -0,0 +1,164 @@
|
||||
-- no partition
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string primary key
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=REDACTED
|
||||
|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@0, j@1], 32), input_partitions=32 REDACTED
|
||||
|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@3, i@0) as prom_irate(j_range,i), k@2 as k] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
drop table t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- partition on tag
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (k, l) (k < 'a', k >= 'a');
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=REDACTED
|
||||
|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))], ordering_mode=PartiallySorted([0]) REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@0, j@1], 32), input_partitions=32 REDACTED
|
||||
|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))], ordering_mode=PartiallySorted([0]) REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@0 as j, prom_irate(j_range,i)@1 as prom_irate(j_range,i), k@2 as k] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC, j@0 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k, l@3 as l] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k, l@3 as l] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=1 REDACTED
|
||||
|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
drop table t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- partition on value
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (i) (i < 1.0, i >= 1.0);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
+-+-+-+
|
||||
| stage | node | plan_|
|
||||
+-+-+-+
|
||||
| 0_| 0_|_ProjectionExec: expr=[k@0 as k, j@1 as j, 100 - avg(prom_irate(j_range,i))@2 * 100 as Float64(100) - avg(prom_irate(j_range,i)) * Float64(100)] REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=REDACTED
|
||||
|_|_|_SortPreservingMergeExec: [k@0 ASC NULLS LAST, j@1 ASC NULLS LAST] REDACTED
|
||||
|_|_|_SortExec: expr=[k@0 ASC NULLS LAST, j@1 ASC NULLS LAST], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[k@0 as k, j@1 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@0, j@1], 32), input_partitions=32 REDACTED
|
||||
|_|_|_AggregateExec: mode=Partial, gby=[k@2 as k, j@0 as j], aggr=[avg(prom_irate(j_range,i))] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_FilterExec: prom_irate(j_range,i)@1 IS NOT NULL REDACTED
|
||||
|_|_|_ProjectionExec: expr=[j@1 as j, prom_irate(j_range@4, i@0) as prom_irate(j_range,i), k@2 as k] REDACTED
|
||||
|_|_|_PromRangeManipulateExec: req range=[0..10000], interval=[1000], eval range=[60000], time index=[j] REDACTED
|
||||
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [true] REDACTED
|
||||
|_|_|_PromSeriesDivideExec: tags=["k", "l"] REDACTED
|
||||
|_|_|_SortExec: expr=[k@2 ASC, l@3 ASC], preserve_partitioning=[true] REDACTED
|
||||
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|
||||
|_|_|_RepartitionExec: partitioning=Hash([k@2, l@3], 32), input_partitions=32 REDACTED
|
||||
|_|_|_MergeScanExec: REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|
||||
|_|_|_|
|
||||
|_|_| Total rows: 0_|
|
||||
+-+-+-+
|
||||
|
||||
drop table t;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
54
tests/cases/standalone/common/tql/partition.sql
Normal file
54
tests/cases/standalone/common/tql/partition.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
-- no partition
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string primary key
|
||||
);
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
drop table t;
|
||||
|
||||
-- partition on tag
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (k, l) (k < 'a', k >= 'a');
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
drop table t;
|
||||
|
||||
-- partition on value
|
||||
create table t (
|
||||
i double,
|
||||
j timestamp time index,
|
||||
k string,
|
||||
l string,
|
||||
primary key (k, l)
|
||||
) partition on columns (i) (i < 1.0, i >= 1.0);
|
||||
|
||||
-- SQLNESS REPLACE (metrics.*) REDACTED
|
||||
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
|
||||
-- SQLNESS REPLACE (-+) -
|
||||
-- SQLNESS REPLACE (\s\s+) _
|
||||
-- SQLNESS REPLACE (peers.*) REDACTED
|
||||
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
|
||||
tql analyze (0, 10, '1s') 100 - (avg by (k) (irate(t[1m])) * 100);
|
||||
|
||||
drop table t;
|
||||
Reference in New Issue
Block a user