Compare commits

..

39 Commits
v0.14.2 ... jkt

Author SHA1 Message Date
Ruihang Xia
edd8cb6710 add rate, increase and delta
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-29 10:54:32 +08:00
Ruihang Xia
7ee61e5d28 disable recursion limit
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-28 16:58:30 +08:00
Ruihang Xia
1b30aca5a5 Merge branch 'prom-plan-commutativity' into jkt 2025-04-27 19:23:42 +08:00
Ruihang Xia
99b352cea1 Merge branch 'main' into prom-plan-commutativity 2025-04-27 17:40:24 +08:00
Ruihang Xia
0f521956bf update tests
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-27 17:35:50 +08:00
Ruihang Xia
aee72ab363 fix clippy
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-27 17:35:29 +08:00
shuiyisong
3c943be189 chore: update rust toolchain (#5818)
* chore: update nightly version

* chore: sort lint lines

* chore: minor fix

* chore: update nix

* chore: update toolchain to 2024-04-14

* chore: update toolchain to 2024-04-15

* chore: remove unnecessory test

* chore: do not assert oid in sqlness test

* chore: fix margin issue

* chore: fix cr issues

* chore: fix cr issues

---------

Co-authored-by: Ning Sun <sunning@greptime.com>
2025-04-27 09:02:36 +00:00
Ruihang Xia
5b78d76fc5 handle partition and ordering
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-27 15:38:57 +08:00
Ning Sun
eeba466717 ci: read next release version from toml by default (#5986)
* ci: read next release version from toml by default

* ci: send error message to stderr

* ci: take the first version only
2025-04-27 04:43:44 +00:00
Zhenchi
2ff54486d3 chore: bump main branch version to 0.15 (#5984)
Signed-off-by: Zhenchi <zhongzc_arch@outlook.com>
2025-04-27 01:39:44 +00:00
evenyag
a166430650 chore: fix typos 2025-04-24 06:08:42 +08:00
evenyag
007a2b3dfe refactor: use ChainedRecordBatchStream to simplify codes 2025-04-24 06:03:07 +08:00
evenyag
f35e957ddd style: fix clippy 2025-04-24 06:03:07 +08:00
evenyag
68414bf593 feat: metrics for send series timeout 2025-04-24 06:03:07 +08:00
evenyag
5e836a0d1b chore: display more info in explain 2025-04-24 06:03:07 +08:00
evenyag
f5e0da2fc8 fix: scanner metrics init 2025-04-24 06:03:07 +08:00
evenyag
fb96d26ebf chore: reset instant 2025-04-24 06:03:07 +08:00
evenyag
0046d3f65b fix: correct scan cost metrics 2025-04-24 06:03:06 +08:00
evenyag
d7b97fc877 fix: add metrics to partition metrics list 2025-04-24 06:03:06 +08:00
evenyag
bfdaa28b25 chore: add comments 2025-04-24 06:03:06 +08:00
evenyag
6293bb1f5b feat: try send before send
reduce the send timeout to 10ms
2025-04-24 06:03:06 +08:00
Ruihang Xia
8fa1ebcc3e assign partition_ranges
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-24 06:03:06 +08:00
Ruihang Xia
c18c3f5839 Revert "feat: keep parallelize_scan unchanged"
This reverts commit 96ba00d175.
2025-04-24 06:03:06 +08:00
evenyag
629e72d8c0 chore: update comment 2025-04-24 06:03:06 +08:00
evenyag
e4065505ab feat: use smallvec 2025-04-24 06:03:05 +08:00
evenyag
aafd164483 fix: include build merge reader cost to scan cost 2025-04-24 06:03:05 +08:00
evenyag
1386e903d6 fix: address compiler errors 2025-04-24 06:03:05 +08:00
evenyag
12692a940c feat: keep parallelize_scan unchanged 2025-04-24 06:03:05 +08:00
evenyag
4d44cbb8b2 fix: use series scan in PerSeries distribution 2025-04-24 06:03:05 +08:00
evenyag
f4911aa3bb refactor: remove per series scan from SeqScan 2025-04-24 06:03:05 +08:00
evenyag
5ac61f17bc feat: parallelize PerSeries 2025-04-24 06:03:05 +08:00
evenyag
e0d34c6d95 feat: use series scan when distribution is PerSeries 2025-04-24 06:03:04 +08:00
evenyag
8a98b9c433 feat: implement scan logic of each partition 2025-04-24 06:03:03 +08:00
evenyag
1f5d36a203 chore: add to scanner enum 2025-04-24 05:57:37 +08:00
evenyag
6fc7168893 chore: basic methods for SeriesScan 2025-04-24 05:57:37 +08:00
Ruihang Xia
2799d67212 change dictionary type
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-11 17:07:55 +08:00
Ruihang Xia
d97a76c312 blocklist in commutativity rule
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-11 17:07:16 +08:00
Ruihang Xia
15caca244e fix range manipulate deserializer
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-10 19:38:03 +08:00
Ruihang Xia
8638075cdd feat: implement commutativity rule for prom-related plans
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2025-04-10 16:26:31 +08:00
105 changed files with 1926 additions and 1740 deletions

View File

@@ -10,17 +10,17 @@ set -e
function create_version() {
# Read from envrionment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty"
echo "GITHUB_EVENT_NAME is empty" >&2
exit 1
fi
if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty"
exit 1
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
export NEXT_RELEASE_VERSION=$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
echo "NIGHTLY_RELEASE_PREFIX is empty"
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
exit 1
fi
@@ -35,7 +35,7 @@ function create_version() {
# It will be like 'dev-2023080819-f0e7216c'.
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
if [ -z "$COMMIT_SHA" ]; then
echo "COMMIT_SHA is empty in dev build"
echo "COMMIT_SHA is empty in dev build" >&2
exit 1
fi
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
@@ -45,7 +45,7 @@ function create_version() {
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
if [ "$GITHUB_EVENT_NAME" = push ]; then
if [ -z "$GITHUB_REF_NAME" ]; then
echo "GITHUB_REF_NAME is empty in push event"
echo "GITHUB_REF_NAME is empty in push event" >&2
exit 1
fi
echo "$GITHUB_REF_NAME"
@@ -54,7 +54,7 @@ function create_version() {
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
else
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
exit 1
fi
}

View File

@@ -1,37 +0,0 @@
#!/bin/bash
DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
# Configure Git configs.
git config --global user.email greptimedb-ci@greptime.com
git config --global user.name greptimedb-ci
# Checkout a new branch.
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
gsed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile
git commit -m "ci: update dev-builder image tag"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "ci: update dev-builder image tag" \
--body "This PR updates the dev-builder image tag" \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_dev_builder_version

View File

@@ -24,19 +24,11 @@ on:
description: Release dev-builder-android image
required: false
default: false
update_dev_builder_image_tag:
type: boolean
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
required: false
default: false
jobs:
release-dev-builder-images:
name: Release dev builder images
# The jobs are triggered by the following events:
# 1. Manually triggered workflow_dispatch event
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-latest
outputs:
version: ${{ steps.set-version.outputs.version }}
@@ -65,9 +57,9 @@ jobs:
version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
@@ -93,7 +85,7 @@ jobs:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -114,7 +106,7 @@ jobs:
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -135,7 +127,7 @@ jobs:
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -170,7 +162,7 @@ jobs:
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -184,7 +176,7 @@ jobs:
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -198,7 +190,7 @@ jobs:
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
@@ -209,24 +201,3 @@ jobs:
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
update-dev-builder-image-tag:
name: Update dev-builder image tag
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
needs: [
release-dev-builder-images
]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update dev-builder image tag
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -90,8 +90,6 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.14.0
jobs:
allocate-runners:
@@ -135,7 +133,6 @@ jobs:
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Allocate linux-amd64 runner

180
Cargo.lock generated
View File

@@ -173,9 +173,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.89"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
[[package]]
name = "anymap2"
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-base",
"common-decimal",
@@ -915,7 +915,7 @@ dependencies = [
[[package]]
name = "auth"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -1537,7 +1537,7 @@ dependencies = [
[[package]]
name = "cache"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"catalog",
"common-error",
@@ -1561,7 +1561,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arrow 54.2.1",
@@ -1597,7 +1597,7 @@ dependencies = [
"partition",
"paste",
"prometheus",
"rustc-hash 2.0.0",
"rustc-hash 2.1.1",
"serde_json",
"session",
"snafu 0.8.5",
@@ -1874,7 +1874,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
[[package]]
name = "cli"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"auth",
@@ -1917,7 +1917,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"store-api",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tempfile",
"tokio",
@@ -1926,7 +1926,7 @@ dependencies = [
[[package]]
name = "client"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arc-swap",
@@ -1955,7 +1955,7 @@ dependencies = [
"rand 0.9.0",
"serde_json",
"snafu 0.8.5",
"substrait 0.14.2",
"substrait 0.15.0",
"substrait 0.37.3",
"tokio",
"tokio-stream",
@@ -1996,7 +1996,7 @@ dependencies = [
[[package]]
name = "cmd"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"auth",
@@ -2056,7 +2056,7 @@ dependencies = [
"similar-asserts",
"snafu 0.8.5",
"store-api",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"temp-env",
"tempfile",
@@ -2102,7 +2102,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"anymap2",
"async-trait",
@@ -2124,11 +2124,11 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.14.2"
version = "0.15.0"
[[package]]
name = "common-config"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-base",
"common-error",
@@ -2153,7 +2153,7 @@ dependencies = [
[[package]]
name = "common-datasource"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"arrow 54.2.1",
"arrow-schema 54.3.1",
@@ -2190,7 +2190,7 @@ dependencies = [
[[package]]
name = "common-decimal"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"bigdecimal 0.4.8",
"common-error",
@@ -2203,7 +2203,7 @@ dependencies = [
[[package]]
name = "common-error"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-macro",
"http 1.1.0",
@@ -2214,7 +2214,7 @@ dependencies = [
[[package]]
name = "common-frontend"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"common-error",
@@ -2224,7 +2224,7 @@ dependencies = [
[[package]]
name = "common-function"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -2277,7 +2277,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"common-runtime",
@@ -2294,7 +2294,7 @@ dependencies = [
[[package]]
name = "common-grpc"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arrow-flight",
@@ -2325,7 +2325,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"common-base",
@@ -2344,7 +2344,7 @@ dependencies = [
[[package]]
name = "common-macro"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"arc-swap",
"common-query",
@@ -2358,7 +2358,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-error",
"common-macro",
@@ -2371,7 +2371,7 @@ dependencies = [
[[package]]
name = "common-meta"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"anymap2",
"api",
@@ -2432,7 +2432,7 @@ dependencies = [
[[package]]
name = "common-options"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-grpc",
"humantime-serde",
@@ -2441,11 +2441,11 @@ dependencies = [
[[package]]
name = "common-plugins"
version = "0.14.2"
version = "0.15.0"
[[package]]
name = "common-pprof"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-error",
"common-macro",
@@ -2457,7 +2457,7 @@ dependencies = [
[[package]]
name = "common-procedure"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-stream",
"async-trait",
@@ -2484,7 +2484,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"common-procedure",
@@ -2493,7 +2493,7 @@ dependencies = [
[[package]]
name = "common-query"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -2519,7 +2519,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"arc-swap",
"common-error",
@@ -2539,7 +2539,7 @@ dependencies = [
[[package]]
name = "common-runtime"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"clap 4.5.19",
@@ -2569,14 +2569,14 @@ dependencies = [
[[package]]
name = "common-session"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"strum 0.27.1",
]
[[package]]
name = "common-telemetry"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"atty",
"backtrace",
@@ -2604,7 +2604,7 @@ dependencies = [
[[package]]
name = "common-test-util"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"client",
"common-query",
@@ -2616,7 +2616,7 @@ dependencies = [
[[package]]
name = "common-time"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"arrow 54.2.1",
"chrono",
@@ -2634,7 +2634,7 @@ dependencies = [
[[package]]
name = "common-version"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"build-data",
"const_format",
@@ -2644,7 +2644,7 @@ dependencies = [
[[package]]
name = "common-wal"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"common-base",
"common-error",
@@ -3110,9 +3110,9 @@ dependencies = [
[[package]]
name = "data-encoding"
version = "2.6.0"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
[[package]]
name = "datafusion"
@@ -3572,7 +3572,7 @@ dependencies = [
[[package]]
name = "datanode"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arrow-flight",
@@ -3624,7 +3624,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"store-api",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tokio",
"toml 0.8.19",
@@ -3633,7 +3633,7 @@ dependencies = [
[[package]]
name = "datatypes"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"arrow 54.2.1",
"arrow-array 54.2.1",
@@ -4259,7 +4259,7 @@ dependencies = [
[[package]]
name = "file-engine"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -4382,7 +4382,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
[[package]]
name = "flow"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arrow 54.2.1",
@@ -4444,7 +4444,7 @@ dependencies = [
"snafu 0.8.5",
"store-api",
"strum 0.27.1",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tokio",
"tonic 0.12.3",
@@ -4499,7 +4499,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arc-swap",
@@ -4556,7 +4556,7 @@ dependencies = [
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"store-api",
"strfmt",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tokio",
"toml 0.8.19",
@@ -5795,7 +5795,7 @@ dependencies = [
[[package]]
name = "index"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -6509,7 +6509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
dependencies = [
"cfg-if",
"windows-targets 0.48.5",
"windows-targets 0.52.6",
]
[[package]]
@@ -6599,13 +6599,13 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.22"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "log-query"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"chrono",
"common-error",
@@ -6617,7 +6617,7 @@ dependencies = [
[[package]]
name = "log-store"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-stream",
"async-trait",
@@ -6911,7 +6911,7 @@ dependencies = [
[[package]]
name = "meta-client"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -6939,7 +6939,7 @@ dependencies = [
[[package]]
name = "meta-srv"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -7029,7 +7029,7 @@ dependencies = [
[[package]]
name = "metric-engine"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"aquamarine",
@@ -7118,7 +7118,7 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"aquamarine",
@@ -7780,7 +7780,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro-crate 3.2.0",
"proc-macro2",
"quote",
"syn 2.0.100",
@@ -7824,7 +7824,7 @@ dependencies = [
[[package]]
name = "object-store"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"anyhow",
"bytes",
@@ -8119,7 +8119,7 @@ dependencies = [
[[package]]
name = "operator"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8168,7 +8168,7 @@ dependencies = [
"sql",
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"store-api",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tokio",
"tokio-util",
@@ -8423,7 +8423,7 @@ dependencies = [
[[package]]
name = "partition"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -8705,7 +8705,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -8847,7 +8847,7 @@ dependencies = [
[[package]]
name = "plugins"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"auth",
"clap 4.5.19",
@@ -9127,7 +9127,7 @@ dependencies = [
[[package]]
name = "promql"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"ahash 0.8.11",
"async-trait",
@@ -9373,7 +9373,7 @@ dependencies = [
[[package]]
name = "puffin"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-compression 0.4.13",
"async-trait",
@@ -9414,7 +9414,7 @@ dependencies = [
[[package]]
name = "query"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -9480,7 +9480,7 @@ dependencies = [
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
"statrs",
"store-api",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tokio",
"tokio-stream",
@@ -9527,7 +9527,7 @@ dependencies = [
"pin-project-lite",
"quinn-proto",
"quinn-udp",
"rustc-hash 2.0.0",
"rustc-hash 2.1.1",
"rustls",
"socket2",
"thiserror 1.0.64",
@@ -9544,7 +9544,7 @@ dependencies = [
"bytes",
"rand 0.8.5",
"ring",
"rustc-hash 2.0.0",
"rustc-hash 2.1.1",
"rustls",
"slab",
"thiserror 1.0.64",
@@ -9821,9 +9821,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
@@ -10333,9 +10333,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "rustc-hash"
version = "2.0.0"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rustc_version"
@@ -10830,7 +10830,7 @@ dependencies = [
[[package]]
name = "servers"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"ahash 0.8.11",
"api",
@@ -10950,7 +10950,7 @@ dependencies = [
[[package]]
name = "session"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arc-swap",
@@ -11158,9 +11158,9 @@ dependencies = [
[[package]]
name = "smallbitvec"
version = "2.5.3"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcc3fc564a4b53fd1e8589628efafe57602d91bde78be18186b5f61e8faea470"
checksum = "d31d263dd118560e1a492922182ab6ca6dc1d03a3bf54e7699993f31a4150e3f"
[[package]]
name = "smallvec"
@@ -11275,7 +11275,7 @@ dependencies = [
[[package]]
name = "sql"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"chrono",
@@ -11330,7 +11330,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"clap 4.5.19",
@@ -11649,7 +11649,7 @@ dependencies = [
[[package]]
name = "store-api"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"aquamarine",
@@ -11798,7 +11798,7 @@ dependencies = [
[[package]]
name = "substrait"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"async-trait",
"bytes",
@@ -11978,7 +11978,7 @@ dependencies = [
[[package]]
name = "table"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"async-trait",
@@ -12229,7 +12229,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-fuzz"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"arbitrary",
"async-trait",
@@ -12273,7 +12273,7 @@ dependencies = [
[[package]]
name = "tests-integration"
version = "0.14.2"
version = "0.15.0"
dependencies = [
"api",
"arrow-flight",
@@ -12340,7 +12340,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
"substrait 0.14.2",
"substrait 0.15.0",
"table",
"tempfile",
"time",

View File

@@ -68,15 +68,16 @@ members = [
resolver = "2"
[workspace.package]
version = "0.14.2"
version = "0.15.0"
edition = "2021"
license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.result_large_err = "allow"
clippy.large_enum_variant = "allow"
clippy.doc_overindented_list_items = "allow"
rust.unknown_lints = "deny"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }

18
flake.lock generated
View File

@@ -8,11 +8,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1737613896,
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
"lastModified": 1742452566,
"narHash": "sha256-sVuLDQ2UIWfXUBbctzrZrXM2X05YjX08K7XHMztt36E=",
"owner": "nix-community",
"repo": "fenix",
"rev": "303a062fdd8e89f233db05868468975d17855d80",
"rev": "7d9ba794daf5e8cc7ee728859bc688d8e26d5f06",
"type": "github"
},
"original": {
@@ -41,11 +41,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1737569578,
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
"lastModified": 1743576891,
"narHash": "sha256-vXiKURtntURybE6FMNFAVpRPr8+e8KoLPrYs9TGuAKc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
"rev": "44a69ed688786e98a101f02b712c313f1ade37ab",
"type": "github"
},
"original": {
@@ -65,11 +65,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1737581772,
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
"lastModified": 1742296961,
"narHash": "sha256-gCpvEQOrugHWLimD1wTFOJHagnSEP6VYBDspq96Idu0=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
"rev": "15d87419f1a123d8f888d608129c3ce3ff8f13d4",
"type": "github"
},
"original": {

View File

@@ -21,7 +21,7 @@
lib = nixpkgs.lib;
rustToolchain = fenix.packages.${system}.fromToolchainName {
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
sha256 = "sha256-i0Sh/ZFFsHlZ3oFZFc24qdk6Cd8Do8OPU4HJQsrKOeM=";
};
in
{

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2024-12-25"
channel = "nightly-2025-04-15"

View File

@@ -84,12 +84,6 @@ mod tests {
let key1 = "3178510";
let key2 = "4215648";
// have collision
assert_eq!(
oid_map.hasher.hash_one(key1) as u32,
oid_map.hasher.hash_one(key2) as u32
);
// insert them into oid_map
let oid1 = oid_map.get_oid(key1);
let oid2 = oid_map.get_oid(key2);

View File

@@ -36,8 +36,8 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::RecordBatchStreamWrapper;
use common_telemetry::error;
use common_telemetry::tracing_context::W3cTrace;
use common_telemetry::{error, warn};
use futures::future;
use futures_util::{Stream, StreamExt, TryStreamExt};
use prost::Message;
@@ -192,36 +192,6 @@ impl Database {
from_grpc_response(response)
}
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
/// is `max_retries * GRPC_CONN_TIMEOUT`
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
let mut client = make_database_client(&self.client)?.inner;
let mut retries = 0;
let request = self.to_rpc_request(request);
loop {
let raw_response = client.handle(request.clone()).await;
match (raw_response, retries < max_retries) {
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
(Err(err), true) => {
// determine if the error is retryable
if is_grpc_retryable(&err) {
// retry
retries += 1;
warn!("Retrying {} times with error = {:?}", retries, err);
continue;
}
}
(Err(err), false) => {
error!(
"Failed to send request to grpc handle after {} retries, error = {:?}",
retries, err
);
return Err(err.into());
}
}
}
}
#[inline]
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
GreptimeRequest {
@@ -398,11 +368,6 @@ impl Database {
}
}
/// by grpc standard, only `Unavailable` is retryable, see: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc
pub fn is_grpc_retryable(err: &tonic::Status) -> bool {
matches!(err.code(), tonic::Code::Unavailable)
}
#[derive(Default, Debug, Clone)]
struct FlightContext {
auth_header: Option<AuthHeader>,

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::time::Duration;
use async_trait::async_trait;
@@ -132,7 +131,7 @@ impl SubCommand {
}
}
#[derive(Default, Parser)]
#[derive(Debug, Default, Parser)]
pub struct StartCommand {
/// The address to bind the gRPC server.
#[clap(long, alias = "bind-addr")]
@@ -172,27 +171,6 @@ pub struct StartCommand {
backend: Option<BackendImpl>,
}
impl fmt::Debug for StartCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StartCommand")
.field("rpc_bind_addr", &self.rpc_bind_addr)
.field("rpc_server_addr", &self.rpc_server_addr)
.field("store_addrs", &self.sanitize_store_addrs())
.field("config_file", &self.config_file)
.field("selector", &self.selector)
.field("use_memory_store", &self.use_memory_store)
.field("enable_region_failover", &self.enable_region_failover)
.field("http_addr", &self.http_addr)
.field("http_timeout", &self.http_timeout)
.field("env_prefix", &self.env_prefix)
.field("data_home", &self.data_home)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("backend", &self.backend)
.finish()
}
}
impl StartCommand {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
let mut opts = MetasrvOptions::load_layered_options(
@@ -206,15 +184,6 @@ impl StartCommand {
Ok(opts)
}
fn sanitize_store_addrs(&self) -> Option<Vec<String>> {
self.store_addrs.as_ref().map(|addrs| {
addrs
.iter()
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
.collect()
})
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,

View File

@@ -12,8 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::iter::repeat_n;
use std::sync::Arc;
use std::{fmt, iter};
use common_query::error::{InvalidFuncArgsSnafu, Result};
use common_query::prelude::Volatility;
@@ -126,9 +127,10 @@ impl Function for MatchesTermFunction {
let term = term_column.get_ref(0).as_string().unwrap();
match term {
None => {
return Ok(Arc::new(BooleanVector::from_iter(
iter::repeat(None).take(text_column.len()),
)));
return Ok(Arc::new(BooleanVector::from_iter(repeat_n(
None,
text_column.len(),
))));
}
Some(term) => Some(MatchesTermFinder::new(term)),
}
@@ -217,7 +219,7 @@ impl MatchesTermFinder {
}
let mut pos = 0;
while let Some(found_pos) = self.finder.find(text[pos..].as_bytes()) {
while let Some(found_pos) = self.finder.find(&text.as_bytes()[pos..]) {
let actual_pos = pos + found_pos;
let prev_ok = self.starts_with_non_alnum

View File

@@ -37,7 +37,7 @@ impl fmt::Display for RateFunction {
impl Function for RateFunction {
fn name(&self) -> &str {
"prom_rate"
"rate"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
@@ -82,7 +82,7 @@ mod tests {
#[test]
fn test_rate_function() {
let rate = RateFunction;
assert_eq!("prom_rate", rate.name());
assert_eq!("rate", rate.name());
assert_eq!(
ConcreteDataType::float64_datatype(),
rate.return_type(&[]).unwrap()

View File

@@ -13,8 +13,10 @@
// limitations under the License.
use std::sync::Arc;
mod greatest;
mod to_unixtime;
use greatest::GreatestFunction;
use to_unixtime::ToUnixtimeFunction;
use crate::function_registry::FunctionRegistry;
@@ -24,5 +26,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(ToUnixtimeFunction));
registry.register(Arc::new(GreatestFunction));
}
}

View File

@@ -0,0 +1,328 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self};
use common_query::error::{
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
};
use common_query::prelude::{Signature, Volatility};
use datafusion::arrow::compute::kernels::cmp::gt;
use datatypes::arrow::array::AsArray;
use datatypes::arrow::compute::cast;
use datatypes::arrow::compute::kernels::zip;
use datatypes::arrow::datatypes::{
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
};
use datatypes::prelude::ConcreteDataType;
use datatypes::types::TimestampType;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
use crate::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct GreatestFunction;
const NAME: &str = "greatest";
macro_rules! gt_time_types {
($ty: ident, $columns:expr) => {{
let column1 = $columns[0].to_arrow_array();
let column2 = $columns[1].to_arrow_array();
let column1 = column1.as_primitive::<$ty>();
let column2 = column2.as_primitive::<$ty>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result = zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)
}};
}
impl Function for GreatestFunction {
fn name(&self) -> &str {
NAME
}
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
ensure!(
input_types.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
input_types.len()
)
}
);
match &input_types[0] {
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: input_types,
}
.fail(),
}
}
fn signature(&self) -> Signature {
Signature::uniform(
2,
vec![
ConcreteDataType::string_datatype(),
ConcreteDataType::date_datatype(),
ConcreteDataType::timestamp_nanosecond_datatype(),
ConcreteDataType::timestamp_microsecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_second_datatype(),
],
Volatility::Immutable,
)
}
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 2,
InvalidFuncArgsSnafu {
err_msg: format!(
"The length of the args is not correct, expect exactly two, have: {}",
columns.len()
),
}
);
match columns[0].data_type() {
ConcreteDataType::String(_) => {
let column1 = cast(
&columns[0].to_arrow_array(),
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
)
.context(ArrowComputeSnafu)?;
let column1 = column1.as_primitive::<TimestampMillisecondType>();
let column2 = cast(
&columns[1].to_arrow_array(),
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
)
.context(ArrowComputeSnafu)?;
let column2 = column2.as_primitive::<TimestampMillisecondType>();
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
let result =
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
}
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
ConcreteDataType::Timestamp(ts_type) => match ts_type {
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
TimestampType::Millisecond(_) => {
gt_time_types!(TimestampMillisecondType, columns)
}
TimestampType::Microsecond(_) => {
gt_time_types!(TimestampMicrosecondType, columns)
}
TimestampType::Nanosecond(_) => {
gt_time_types!(TimestampNanosecondType, columns)
}
},
_ => UnsupportedInputDataTypeSnafu {
function: NAME,
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
}
.fail(),
}
}
}
impl fmt::Display for GreatestFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GREATEST")
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use common_time::timestamp::TimeUnit;
use common_time::{Date, Timestamp};
use datatypes::types::{
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
TimestampSecondType,
};
use datatypes::value::Value;
use datatypes::vectors::{
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, Vector,
};
use paste::paste;
use super::*;
#[test]
fn test_greatest_takes_string_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::string_datatype(),
ConcreteDataType::string_datatype()
])
.unwrap(),
ConcreteDataType::timestamp_millisecond_datatype()
);
let columns = vec![
Arc::new(StringVector::from(vec![
"1970-01-01".to_string(),
"2012-12-23".to_string(),
])) as _,
Arc::new(StringVector::from(vec![
"2001-02-01".to_string(),
"1999-01-01".to_string(),
])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result
.as_any()
.downcast_ref::<TimestampMillisecondVector>()
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
);
}
#[test]
fn test_greatest_takes_date_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::date_datatype(),
ConcreteDataType::date_datatype()
])
.unwrap(),
ConcreteDataType::Date(DateType)
);
let columns = vec![
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Date(Date::from_str_utc("1970-01-01").unwrap())
);
assert_eq!(
result.get(1),
Value::Date(Date::from_str_utc("1970-01-03").unwrap())
);
}
#[test]
fn test_greatest_takes_datetime_vector() {
let function = GreatestFunction;
assert_eq!(
function
.return_type(&[
ConcreteDataType::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype()
])
.unwrap(),
ConcreteDataType::timestamp_millisecond_datatype()
);
let columns = vec![
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
];
let result = function
.eval(&FunctionContext::default(), &columns)
.unwrap();
let result = result
.as_any()
.downcast_ref::<TimestampMillisecondVector>()
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
);
}
macro_rules! test_timestamp {
($type: expr,$unit: ident) => {
paste! {
#[test]
fn [<test_greatest_takes_ $unit:lower _vector>]() {
let function = GreatestFunction;
assert_eq!(
function.return_type(&[$type, $type]).unwrap(),
ConcreteDataType::Timestamp(TimestampType::$unit([<Timestamp $unit Type>]))
);
let columns = vec![
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![-1, 2])) as _,
Arc::new([<Timestamp $unit Vector>]::from_slice(vec![0, 1])) as _,
];
let result = function.eval(&FunctionContext::default(), &columns).unwrap();
let result = result.as_any().downcast_ref::<[<Timestamp $unit Vector>]>().unwrap();
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
);
assert_eq!(
result.get(1),
Value::Timestamp(Timestamp::new(2, TimeUnit::$unit))
);
}
}
}
}
test_timestamp!(
ConcreteDataType::timestamp_nanosecond_datatype(),
Nanosecond
);
test_timestamp!(
ConcreteDataType::timestamp_microsecond_datatype(),
Microsecond
);
test_timestamp!(
ConcreteDataType::timestamp_millisecond_datatype(),
Millisecond
);
test_timestamp!(ConcreteDataType::timestamp_second_datatype(), Second);
}

View File

@@ -35,7 +35,7 @@ pub mod memory;
pub mod rds;
pub mod test;
pub mod txn;
pub mod util;
pub type KvBackendRef<E = Error> = Arc<dyn KvBackend<Error = E> + Send + Sync>;
#[async_trait]

View File

@@ -1,85 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Removes sensitive information like passwords from connection strings.
///
/// This function sanitizes connection strings by removing credentials:
/// - For URL format (mysql://user:password@host:port/db): Removes everything before '@'
/// - For parameter format (host=localhost password=secret): Removes the password parameter
/// - For URL format without credentials (mysql://host:port/db): Removes the protocol prefix
///
/// # Arguments
///
/// * `conn_str` - The connection string to sanitize
///
/// # Returns
///
/// A sanitized version of the connection string with sensitive information removed
pub fn sanitize_connection_string(conn_str: &str) -> String {
// Case 1: URL format with credentials (mysql://user:password@host:port/db)
// Extract everything after the '@' symbol
if let Some(at_pos) = conn_str.find('@') {
return conn_str[at_pos + 1..].to_string();
}
// Case 2: Parameter format with password (host=localhost password=secret dbname=mydb)
// Filter out any parameter that starts with "password="
if conn_str.contains("password=") {
return conn_str
.split_whitespace()
.filter(|param| !param.starts_with("password="))
.collect::<Vec<_>>()
.join(" ");
}
// Case 3: URL format without credentials (mysql://host:port/db)
// Extract everything after the protocol prefix
if let Some(host_part) = conn_str.split("://").nth(1) {
return host_part.to_string();
}
// Case 4: Already sanitized or unknown format
// Return as is
conn_str.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sanitize_connection_string() {
// Test URL format with username/password
let conn_str = "mysql://user:password123@localhost:3306/db";
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
// Test URL format without credentials
let conn_str = "mysql://localhost:3306/db";
assert_eq!(sanitize_connection_string(conn_str), "localhost:3306/db");
// Test parameter format with password
let conn_str = "host=localhost port=5432 user=postgres password=secret dbname=mydb";
assert_eq!(
sanitize_connection_string(conn_str),
"host=localhost port=5432 user=postgres dbname=mydb"
);
// Test parameter format without password
let conn_str = "host=localhost port=5432 user=postgres dbname=mydb";
assert_eq!(
sanitize_connection_string(conn_str),
"host=localhost port=5432 user=postgres dbname=mydb"
);
}
}

View File

@@ -15,8 +15,6 @@
#![feature(assert_matches)]
#![feature(btree_extract_if)]
#![feature(let_chains)]
#![feature(extract_if)]
#![feature(hash_extract_if)]
pub mod cache;
pub mod cache_invalidator;

View File

@@ -176,15 +176,12 @@ impl TableRoute {
})?
.into();
let leader_peer = peers
.get(region_route.leader_peer_index as usize)
.cloned()
.map(Into::into);
let leader_peer = peers.get(region_route.leader_peer_index as usize).cloned();
let follower_peers = region_route
.follower_peer_indexes
.into_iter()
.filter_map(|x| peers.get(x as usize).cloned().map(Into::into))
.filter_map(|x| peers.get(x as usize).cloned())
.collect::<Vec<_>>();
region_routes.push(RegionRoute {

View File

@@ -24,7 +24,7 @@ use datatypes::prelude::*;
use datatypes::vectors::{Helper as VectorHelper, VectorRef};
use snafu::ResultExt;
use crate::error::{self, Error, FromScalarValueSnafu, IntoVectorSnafu, Result};
use crate::error::{self, FromScalarValueSnafu, IntoVectorSnafu, Result};
use crate::prelude::*;
pub type AggregateFunctionCreatorRef = Arc<dyn AggregateFunctionCreator>;
@@ -166,8 +166,7 @@ impl DfAccumulator for DfAccumulatorAdaptor {
let output_type = self.creator.output_type()?;
let scalar_value = value
.try_to_scalar_value(&output_type)
.context(error::ToScalarValueSnafu)
.map_err(Error::from)?;
.context(error::ToScalarValueSnafu)?;
Ok(scalar_value)
}

View File

@@ -398,46 +398,45 @@ impl DatanodeBuilder {
schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<Vec<RegionEngineRef>> {
let mut metric_engine_config = metric_engine::config::EngineConfig::default();
let mut mito_engine_config = MitoConfig::default();
let mut file_engine_config = file_engine::config::EngineConfig::default();
let mut engines = vec![];
let mut metric_engine_config = opts.region_engine.iter().find_map(|c| match c {
RegionEngineConfig::Metric(config) => Some(config.clone()),
_ => None,
});
for engine in &opts.region_engine {
match engine {
RegionEngineConfig::Mito(config) => {
mito_engine_config = config.clone();
let mito_engine = Self::build_mito_engine(
opts,
object_store_manager.clone(),
config.clone(),
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
let metric_engine = MetricEngine::try_new(
mito_engine.clone(),
metric_engine_config.take().unwrap_or_default(),
)
.context(BuildMetricEngineSnafu)?;
engines.push(Arc::new(mito_engine) as _);
engines.push(Arc::new(metric_engine) as _);
}
RegionEngineConfig::File(config) => {
file_engine_config = config.clone();
let engine = FileRegionEngine::new(
config.clone(),
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
);
engines.push(Arc::new(engine) as _);
}
RegionEngineConfig::Metric(metric_config) => {
metric_engine_config = metric_config.clone();
RegionEngineConfig::Metric(_) => {
// Already handled in `build_mito_engine`.
}
}
}
let mito_engine = Self::build_mito_engine(
opts,
object_store_manager.clone(),
mito_engine_config,
schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
let metric_engine = MetricEngine::try_new(mito_engine.clone(), metric_engine_config)
.context(BuildMetricEngineSnafu)?;
let file_engine = FileRegionEngine::new(
file_engine_config,
object_store_manager.default_object_store().clone(), // TODO: implement custom storage for file engine
);
Ok(vec![
Arc::new(mito_engine) as _,
Arc::new(metric_engine) as _,
Arc::new(file_engine) as _,
])
Ok(engines)
}
/// Builds [MitoEngine] according to options.

View File

@@ -253,9 +253,10 @@ fn create_current_timestamp_vector(
data_type: &ConcreteDataType,
num_rows: usize,
) -> Result<VectorRef> {
let current_timestamp_vector = TimestampMillisecondVector::from_values(
std::iter::repeat(util::current_time_millis()).take(num_rows),
);
let current_timestamp_vector = TimestampMillisecondVector::from_values(std::iter::repeat_n(
util::current_time_millis(),
num_rows,
));
if data_type.is_timestamp() {
current_timestamp_vector.cast(data_type)
} else {

View File

@@ -198,8 +198,7 @@ impl fmt::Debug for ConstantVector {
impl Serializable for ConstantVector {
fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
std::iter::repeat(self.get(0))
.take(self.len())
std::iter::repeat_n(self.get(0), self.len())
.map(serde_json::Value::try_from)
.collect::<serde_json::Result<_>>()
.context(SerializeSnafu)

View File

@@ -412,7 +412,7 @@ pub(crate) fn replicate_decimal128(
// Safety: std::iter::Repeat and std::iter::Take implement TrustedLen.
builder
.mutable_array
.append_trusted_len_iter(std::iter::repeat(data).take(repeat_times));
.append_trusted_len_iter(std::iter::repeat_n(data, repeat_times));
}
}
None => {

View File

@@ -16,8 +16,8 @@ use std::any::Any;
use std::sync::Arc;
use arrow::array::Array;
use arrow::datatypes::Int32Type;
use arrow_array::{ArrayRef, DictionaryArray, Int32Array};
use arrow::datatypes::Int64Type;
use arrow_array::{ArrayRef, DictionaryArray, Int64Array};
use serde_json::Value as JsonValue;
use snafu::ResultExt;
@@ -32,7 +32,7 @@ use crate::vectors::{self, Helper, Validity, Vector, VectorRef};
/// Vector of dictionaries, basically backed by Arrow's `DictionaryArray`.
#[derive(Debug, PartialEq)]
pub struct DictionaryVector {
array: DictionaryArray<Int32Type>,
array: DictionaryArray<Int64Type>,
/// The datatype of the items in the dictionary.
item_type: ConcreteDataType,
/// The vector of items in the dictionary.
@@ -41,7 +41,7 @@ pub struct DictionaryVector {
impl DictionaryVector {
/// Create a new instance of `DictionaryVector` from a dictionary array and item type
pub fn new(array: DictionaryArray<Int32Type>, item_type: ConcreteDataType) -> Result<Self> {
pub fn new(array: DictionaryArray<Int64Type>, item_type: ConcreteDataType) -> Result<Self> {
let item_vector = Helper::try_into_vector(array.values())?;
Ok(Self {
@@ -52,12 +52,12 @@ impl DictionaryVector {
}
/// Returns the underlying Arrow dictionary array
pub fn array(&self) -> &DictionaryArray<Int32Type> {
pub fn array(&self) -> &DictionaryArray<Int64Type> {
&self.array
}
/// Returns the keys array of this dictionary
pub fn keys(&self) -> &arrow_array::PrimitiveArray<Int32Type> {
pub fn keys(&self) -> &arrow_array::PrimitiveArray<Int64Type> {
self.array.keys()
}
@@ -74,7 +74,7 @@ impl DictionaryVector {
impl Vector for DictionaryVector {
fn data_type(&self) -> ConcreteDataType {
ConcreteDataType::Dictionary(DictionaryType::new(
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
self.item_type.clone(),
))
}
@@ -163,10 +163,10 @@ impl Serializable for DictionaryVector {
}
}
impl TryFrom<DictionaryArray<Int32Type>> for DictionaryVector {
impl TryFrom<DictionaryArray<Int64Type>> for DictionaryVector {
type Error = crate::error::Error;
fn try_from(array: DictionaryArray<Int32Type>) -> Result<Self> {
fn try_from(array: DictionaryArray<Int64Type>) -> Result<Self> {
let item_type = ConcreteDataType::from_arrow_type(array.values().data_type());
let item_vector = Helper::try_into_vector(array.values())?;
@@ -243,7 +243,7 @@ impl VectorOp for DictionaryVector {
previous_offset = offset;
}
let new_keys = Int32Array::from(replicated_keys);
let new_keys = Int64Array::from(replicated_keys);
let new_array = DictionaryArray::try_new(new_keys, self.values().clone())
.expect("Failed to create replicated dictionary array");
@@ -261,7 +261,7 @@ impl VectorOp for DictionaryVector {
let filtered_key_array = filtered_key_vector.to_arrow_array();
let filtered_key_array = filtered_key_array
.as_any()
.downcast_ref::<Int32Array>()
.downcast_ref::<Int64Array>()
.unwrap();
let new_array = DictionaryArray::try_new(filtered_key_array.clone(), self.values().clone())
@@ -291,7 +291,7 @@ impl VectorOp for DictionaryVector {
let key_vector = Helper::try_into_vector(&key_array)?;
let new_key_vector = key_vector.take(indices)?;
let new_key_array = new_key_vector.to_arrow_array();
let new_key_array = new_key_array.as_any().downcast_ref::<Int32Array>().unwrap();
let new_key_array = new_key_array.as_any().downcast_ref::<Int64Array>().unwrap();
let new_array = DictionaryArray::try_new(new_key_array.clone(), self.values().clone())
.expect("Failed to create filtered dictionary array");
@@ -318,7 +318,7 @@ mod tests {
// Keys: [0, 1, 2, null, 1, 3]
// Resulting in: ["a", "b", "c", null, "b", "d"]
let values = StringArray::from(vec!["a", "b", "c", "d"]);
let keys = Int32Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]);
let keys = Int64Array::from(vec![Some(0), Some(1), Some(2), None, Some(1), Some(3)]);
let dict_array = DictionaryArray::new(keys, Arc::new(values));
DictionaryVector::try_from(dict_array).unwrap()
}
@@ -404,7 +404,7 @@ mod tests {
assert_eq!(
casted.data_type(),
ConcreteDataType::Dictionary(DictionaryType::new(
ConcreteDataType::int32_datatype(),
ConcreteDataType::int64_datatype(),
ConcreteDataType::string_datatype(),
))
);

View File

@@ -20,7 +20,7 @@ use std::sync::Arc;
use arrow::array::{Array, ArrayRef, StringArray};
use arrow::compute;
use arrow::compute::kernels::comparison;
use arrow::datatypes::{DataType as ArrowDataType, Int32Type, TimeUnit};
use arrow::datatypes::{DataType as ArrowDataType, Int64Type, TimeUnit};
use arrow_array::DictionaryArray;
use arrow_schema::IntervalUnit;
use datafusion_common::ScalarValue;
@@ -348,11 +348,11 @@ impl Helper {
ArrowDataType::Decimal128(_, _) => {
Arc::new(Decimal128Vector::try_from_arrow_array(array)?)
}
ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int32) => {
ArrowDataType::Dictionary(key, value) if matches!(&**key, ArrowDataType::Int64) => {
let array = array
.as_ref()
.as_any()
.downcast_ref::<DictionaryArray<Int32Type>>()
.downcast_ref::<DictionaryArray<Int64Type>>()
.unwrap(); // Safety: the type is guarded by match arm condition
Arc::new(DictionaryVector::new(
array.clone(),

View File

@@ -120,9 +120,7 @@ impl fmt::Debug for NullVector {
impl Serializable for NullVector {
fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
Ok(std::iter::repeat(serde_json::Value::Null)
.take(self.len())
.collect())
Ok(std::iter::repeat_n(serde_json::Value::Null, self.len()).collect())
}
}

View File

@@ -388,7 +388,7 @@ pub(crate) fn replicate_primitive<T: LogicalPrimitiveType>(
// Safety: std::iter::Repeat and std::iter::Take implement TrustedLen.
builder
.mutable_array
.append_trusted_len_iter(std::iter::repeat(data).take(repeat_times));
.append_trusted_len_iter(std::iter::repeat_n(data, repeat_times));
}
}
None => {

View File

@@ -37,12 +37,11 @@ use tokio::sync::{Mutex, RwLock};
use crate::adapter::{CreateFlowArgs, StreamingEngine};
use crate::batching_mode::engine::BatchingEngine;
use crate::batching_mode::{FRONTEND_SCAN_TIMEOUT, MIN_REFRESH_DURATION};
use crate::engine::FlowEngine;
use crate::error::{
CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu, IllegalCheckTaskStateSnafu,
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, NoAvailableFrontendSnafu,
SyncCheckTaskSnafu, UnexpectedSnafu,
InsertIntoFlowSnafu, InternalSnafu, JoinTaskSnafu, ListFlowsSnafu, SyncCheckTaskSnafu,
UnexpectedSnafu,
};
use crate::metrics::METRIC_FLOW_TASK_COUNT;
use crate::repr::{self, DiffRow};
@@ -82,11 +81,6 @@ impl FlowDualEngine {
}
}
/// Determine if the engine is in distributed mode
pub fn is_distributed(&self) -> bool {
self.streaming_engine.node_id.is_some()
}
pub fn streaming_engine(&self) -> Arc<StreamingEngine> {
self.streaming_engine.clone()
}
@@ -95,39 +89,6 @@ impl FlowDualEngine {
self.batching_engine.clone()
}
/// In distributed mode, scan periodically(1s) until available frontend is found, or timeout,
/// in standalone mode, return immediately
/// notice here if any frontend appear in cluster info this function will return immediately
async fn wait_for_available_frontend(&self, timeout: std::time::Duration) -> Result<(), Error> {
if !self.is_distributed() {
return Ok(());
}
let frontend_client = self.batching_engine().frontend_client.clone();
let sleep_duration = std::time::Duration::from_millis(1_000);
let now = std::time::Instant::now();
loop {
let frontend_list = frontend_client.scan_for_frontend().await?;
if !frontend_list.is_empty() {
let fe_list = frontend_list
.iter()
.map(|(_, info)| &info.peer.addr)
.collect::<Vec<_>>();
info!("Available frontend found: {:?}", fe_list);
return Ok(());
}
let elapsed = now.elapsed();
tokio::time::sleep(sleep_duration).await;
info!("Waiting for available frontend, elapsed={:?}", elapsed);
if elapsed >= timeout {
return NoAvailableFrontendSnafu {
timeout,
context: "No available frontend found in cluster info",
}
.fail();
}
}
}
/// Try to sync with check task, this is only used in drop flow&flush flow, so a flow id is required
///
/// the need to sync is to make sure flush flow actually get called
@@ -377,36 +338,18 @@ struct ConsistentCheckTask {
impl ConsistentCheckTask {
async fn start_check_task(engine: &Arc<FlowDualEngine>) -> Result<Self, Error> {
let engine = engine.clone();
// first do recover flows
engine.check_flow_consistent(true, false).await?;
let inner = engine.clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let (trigger_tx, mut trigger_rx) =
tokio::sync::mpsc::channel::<(bool, bool, tokio::sync::oneshot::Sender<()>)>(10);
let handle = common_runtime::spawn_global(async move {
// first check if available frontend is found
if let Err(err) = engine
.wait_for_available_frontend(FRONTEND_SCAN_TIMEOUT)
.await
{
warn!("No frontend is available yet:\n {err:?}");
}
// then do recover flows, if failed, always retry
let mut recover_retry = 0;
while let Err(err) = engine.check_flow_consistent(true, false).await {
recover_retry += 1;
error!(
"Failed to recover flows:\n {err:?}, retry {} in {}s",
recover_retry,
MIN_REFRESH_DURATION.as_secs()
);
tokio::time::sleep(MIN_REFRESH_DURATION).await;
}
// then do check flows, with configurable allow_create and allow_drop
let (mut allow_create, mut allow_drop) = (false, false);
let mut ret_signal: Option<tokio::sync::oneshot::Sender<()>> = None;
loop {
if let Err(err) = engine.check_flow_consistent(allow_create, allow_drop).await {
if let Err(err) = inner.check_flow_consistent(allow_create, allow_drop).await {
error!(err; "Failed to check flow consistent");
}
if let Some(done) = ret_signal.take() {
@@ -591,12 +534,7 @@ impl FlowEngine for FlowDualEngine {
match flow_type {
Some(FlowType::Batching) => self.batching_engine.flush_flow(flow_id).await,
Some(FlowType::Streaming) => self.streaming_engine.flush_flow(flow_id).await,
None => {
warn!(
"Currently flow={flow_id} doesn't exist in flownode, ignore flush_flow request"
);
Ok(0)
}
None => Ok(0),
}
}

View File

@@ -31,19 +31,10 @@ pub const DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
/// The minimum duration between two queries execution by batching mode task
pub const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
/// Grpc connection timeout
const GRPC_CONN_TIMEOUT: Duration = Duration::from_secs(5);
/// Grpc max retry number
const GRPC_MAX_RETRIES: u32 = 3;
/// Flow wait for available frontend timeout,
/// if failed to find available frontend after FRONTEND_SCAN_TIMEOUT elapsed, return error
/// which should prevent flownode from starting
pub const FRONTEND_SCAN_TIMEOUT: Duration = Duration::from_secs(30);
/// Frontend activity timeout
/// if frontend is down(not sending heartbeat) for more than FRONTEND_ACTIVITY_TIMEOUT, it will be removed from the list that flownode use to connect
pub const FRONTEND_ACTIVITY_TIMEOUT: Duration = Duration::from_secs(60);

View File

@@ -49,8 +49,7 @@ use crate::{CreateFlowArgs, Error, FlowId, TableName};
pub struct BatchingEngine {
tasks: RwLock<BTreeMap<FlowId, BatchingTask>>,
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
/// frontend client for insert request
pub(crate) frontend_client: Arc<FrontendClient>,
frontend_client: Arc<FrontendClient>,
flow_metadata_manager: FlowMetadataManagerRef,
table_meta: TableMetadataManagerRef,
catalog_manager: CatalogManagerRef,

View File

@@ -15,7 +15,6 @@
//! Frontend client to run flow as batching task which is time-window-aware normal query triggered every tick set by user
use std::sync::{Arc, Weak};
use std::time::SystemTime;
use api::v1::greptime_request::Request;
use api::v1::CreateTableExpr;
@@ -27,17 +26,15 @@ use common_meta::peer::Peer;
use common_meta::rpc::store::RangeRequest;
use common_query::Output;
use common_telemetry::warn;
use itertools::Itertools;
use meta_client::client::MetaClient;
use servers::query_handler::grpc::GrpcQueryHandler;
use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use crate::batching_mode::{
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, FRONTEND_ACTIVITY_TIMEOUT, GRPC_CONN_TIMEOUT,
GRPC_MAX_RETRIES,
DEFAULT_BATCHING_ENGINE_QUERY_TIMEOUT, GRPC_CONN_TIMEOUT, GRPC_MAX_RETRIES,
};
use crate::error::{ExternalSnafu, InvalidRequestSnafu, NoAvailableFrontendSnafu, UnexpectedSnafu};
use crate::error::{ExternalSnafu, InvalidRequestSnafu, UnexpectedSnafu};
use crate::Error;
/// Just like [`GrpcQueryHandler`] but use BoxedError
@@ -130,24 +127,10 @@ impl DatabaseWithPeer {
fn new(database: Database, peer: Peer) -> Self {
Self { database, peer }
}
/// Try sending a "SELECT 1" to the database
async fn try_select_one(&self) -> Result<(), Error> {
// notice here use `sql` for `SELECT 1` return 1 row
let _ = self
.database
.sql("SELECT 1")
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to handle `SELECT 1` request at {:?}", self.peer),
})?;
Ok(())
}
}
impl FrontendClient {
/// scan for available frontend from metadata
pub(crate) async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
let Self::Distributed { meta_client, .. } = self else {
return Ok(vec![]);
};
@@ -177,8 +160,8 @@ impl FrontendClient {
Ok(res)
}
/// Get the database with maximum `last_activity_ts`& is able to process query
async fn get_latest_active_frontend(
/// Get the database with max `last_activity_ts`
async fn get_last_active_frontend(
&self,
catalog: &str,
schema: &str,
@@ -194,50 +177,22 @@ impl FrontendClient {
.fail();
};
let mut interval = tokio::time::interval(GRPC_CONN_TIMEOUT);
interval.tick().await;
for retry in 0..GRPC_MAX_RETRIES {
let frontends = self.scan_for_frontend().await?;
let now_in_ms = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_millis() as i64;
let frontends = self.scan_for_frontend().await?;
let mut peer = None;
// found node with maximum last_activity_ts
for (_, node_info) in frontends
.iter()
.sorted_by_key(|(_, node_info)| node_info.last_activity_ts)
.rev()
// filter out frontend that have been down for more than 1 min
.filter(|(_, node_info)| {
node_info.last_activity_ts + FRONTEND_ACTIVITY_TIMEOUT.as_millis() as i64
> now_in_ms
})
{
let addr = &node_info.peer.addr;
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![addr.clone()]);
let database = Database::new(catalog, schema, client);
let db = DatabaseWithPeer::new(database, node_info.peer.clone());
match db.try_select_one().await {
Ok(_) => return Ok(db),
Err(e) => {
warn!(
"Failed to connect to frontend {} on retry={}: \n{e:?}",
addr, retry
);
}
}
if let Some((_, val)) = frontends.iter().max_by_key(|(_, val)| val.last_activity_ts) {
peer = Some(val.peer.clone());
}
let Some(peer) = peer else {
UnexpectedSnafu {
reason: format!("No frontend available: {:?}", frontends),
}
// no available frontend
// sleep and retry
interval.tick().await;
}
NoAvailableFrontendSnafu {
timeout: GRPC_CONN_TIMEOUT,
context: "No available frontend found that is able to process query",
}
.fail()
.fail()?
};
let client = Client::with_manager_and_urls(chnl_mgr.clone(), vec![peer.addr.clone()]);
let database = Database::new(catalog, schema, client);
Ok(DatabaseWithPeer::new(database, peer))
}
pub async fn create(
@@ -267,18 +222,38 @@ impl FrontendClient {
) -> Result<u32, Error> {
match self {
FrontendClient::Distributed { .. } => {
let db = self.get_latest_active_frontend(catalog, schema).await?;
let db = self.get_last_active_frontend(catalog, schema).await?;
*peer_desc = Some(PeerDesc::Dist {
peer: db.peer.clone(),
});
db.database
.handle_with_retry(req.clone(), GRPC_MAX_RETRIES)
.await
.with_context(|_| InvalidRequestSnafu {
context: format!("Failed to handle request at {:?}: {:?}", db.peer, req),
})
let mut retry = 0;
loop {
let ret = db.database.handle(req.clone()).await.with_context(|_| {
InvalidRequestSnafu {
context: format!("Failed to handle request: {:?}", req),
}
});
if let Err(err) = ret {
if retry < GRPC_MAX_RETRIES {
retry += 1;
warn!(
"Failed to send request to grpc handle at Peer={:?}, retry = {}, error = {:?}",
db.peer, retry, err
);
continue;
} else {
common_telemetry::error!(
"Failed to send request to grpc handle at Peer={:?} after {} retries, error = {:?}",
db.peer, retry, err
);
return Err(err);
}
}
return ret;
}
}
FrontendClient::Standalone { database_client } => {
let ctx = QueryContextBuilder::default()

View File

@@ -61,16 +61,6 @@ pub enum Error {
location: Location,
},
#[snafu(display(
"No available frontend found after timeout: {timeout:?}, context: {context}"
))]
NoAvailableFrontend {
timeout: std::time::Duration,
context: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("External error"))]
External {
source: BoxedError,
@@ -306,8 +296,7 @@ impl ErrorExt for Error {
Self::Eval { .. }
| Self::JoinTask { .. }
| Self::Datafusion { .. }
| Self::InsertIntoFlow { .. }
| Self::NoAvailableFrontend { .. } => StatusCode::Internal,
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
Self::TableNotFound { .. }
| Self::TableNotFoundMeta { .. }

View File

@@ -172,8 +172,6 @@ impl FlownodeServer {
}
/// Start the background task for streaming computation.
///
/// Should be called only after heartbeat is establish, hence can get cluster info
async fn start_workers(&self) -> Result<(), Error> {
let manager_ref = self.inner.flow_service.dual_engine.clone();
let handle = manager_ref

View File

@@ -481,7 +481,7 @@ mod tests {
let mock_values = dic_values
.iter()
.flat_map(|(value, size)| iter::repeat(value.clone()).take(*size))
.flat_map(|(value, size)| std::iter::repeat_n(value.clone(), *size))
.collect::<Vec<_>>();
let sorted_result = sorted_result(&mock_values, segment_row_count);

View File

@@ -14,7 +14,6 @@
#![feature(result_flattening)]
#![feature(assert_matches)]
#![feature(extract_if)]
#![feature(hash_set_entry)]
pub mod bootstrap;

View File

@@ -14,7 +14,7 @@
pub mod builder;
use std::fmt::{self, Display};
use std::fmt::Display;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
@@ -96,7 +96,7 @@ pub enum BackendImpl {
MysqlStore,
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(default)]
pub struct MetasrvOptions {
/// The address the server listens on.
@@ -166,47 +166,6 @@ pub struct MetasrvOptions {
pub node_max_idle_time: Duration,
}
impl fmt::Debug for MetasrvOptions {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut debug_struct = f.debug_struct("MetasrvOptions");
debug_struct
.field("bind_addr", &self.bind_addr)
.field("server_addr", &self.server_addr)
.field("store_addrs", &self.sanitize_store_addrs())
.field("selector", &self.selector)
.field("use_memory_store", &self.use_memory_store)
.field("enable_region_failover", &self.enable_region_failover)
.field(
"allow_region_failover_on_local_wal",
&self.allow_region_failover_on_local_wal,
)
.field("http", &self.http)
.field("logging", &self.logging)
.field("procedure", &self.procedure)
.field("failure_detector", &self.failure_detector)
.field("datanode", &self.datanode)
.field("enable_telemetry", &self.enable_telemetry)
.field("data_home", &self.data_home)
.field("wal", &self.wal)
.field("export_metrics", &self.export_metrics)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("flush_stats_factor", &self.flush_stats_factor)
.field("tracing", &self.tracing)
.field("backend", &self.backend);
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
debug_struct.field("meta_table_name", &self.meta_table_name);
#[cfg(feature = "pg_kvbackend")]
debug_struct.field("meta_election_lock_id", &self.meta_election_lock_id);
debug_struct
.field("node_max_idle_time", &self.node_max_idle_time)
.finish()
}
}
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
impl Default for MetasrvOptions {
@@ -290,13 +249,6 @@ impl MetasrvOptions {
common_telemetry::debug!("detect local IP is not supported on Android");
}
}
fn sanitize_store_addrs(&self) -> Vec<String> {
self.store_addrs
.iter()
.map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
.collect()
}
}
pub struct MetasrvInfo {

View File

@@ -141,10 +141,7 @@ pub async fn mock(
if let Some(client) = client {
Ok(TokioIo::new(client))
} else {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Client already taken",
))
Err(std::io::Error::other("Client already taken"))
}
}
}),

View File

@@ -278,7 +278,7 @@ impl KvBackend for LeaderCachedKvBackend {
let remote_res = self.store.batch_get(remote_req).await?;
let put_req = BatchPutRequest {
kvs: remote_res.kvs.clone().into_iter().map(Into::into).collect(),
kvs: remote_res.kvs.clone().into_iter().collect(),
..Default::default()
};
let _ = self.cache.batch_put(put_req).await?;

View File

@@ -710,8 +710,8 @@ pub enum Error {
error: std::io::Error,
},
#[snafu(display("Failed to filter record batch"))]
FilterRecordBatch {
#[snafu(display("Record batch error"))]
RecordBatch {
source: common_recordbatch::error::Error,
#[snafu(implicit)]
location: Location,
@@ -1032,6 +1032,20 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to scan series"))]
ScanSeries {
#[snafu(implicit)]
location: Location,
source: Arc<Error>,
},
#[snafu(display("Partition {} scan multiple times", partition))]
ScanMultiTimes {
partition: usize,
#[snafu(implicit)]
location: Location,
},
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -1154,7 +1168,7 @@ impl ErrorExt for Error {
External { source, .. } => source.status_code(),
FilterRecordBatch { source, .. } => source.status_code(),
RecordBatch { source, .. } => source.status_code(),
Download { .. } | Upload { .. } => StatusCode::StorageUnavailable,
ChecksumMismatch { .. } => StatusCode::Unexpected,
@@ -1183,7 +1197,12 @@ impl ErrorExt for Error {
ManualCompactionOverride {} => StatusCode::Cancelled,
IncompatibleWalProviderChange { .. } => StatusCode::InvalidArguments,
ConvertDataType { .. } => StatusCode::Internal,
ScanSeries { source, .. } => source.status_code(),
ScanMultiTimes { .. } => StatusCode::InvalidArguments,
}
}

View File

@@ -302,10 +302,7 @@ impl PartitionTreeMemtable {
fn update_stats(&self, metrics: &WriteMetrics) {
// Only let the tracker tracks value bytes.
self.alloc_tracker.on_allocation(metrics.value_bytes);
self.max_timestamp
.fetch_max(metrics.max_ts, Ordering::SeqCst);
self.min_timestamp
.fetch_min(metrics.min_ts, Ordering::SeqCst);
metrics.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
}
}

View File

@@ -14,6 +14,8 @@
//! Internal metrics of the memtable.
use std::sync::atomic::{AtomicI64, Ordering};
/// Metrics of writing memtables.
pub(crate) struct WriteMetrics {
/// Size allocated by keys.
@@ -26,6 +28,51 @@ pub(crate) struct WriteMetrics {
pub(crate) max_ts: i64,
}
impl WriteMetrics {
/// Update the min/max timestamp range according to current write metric.
pub(crate) fn update_timestamp_range(&self, prev_max_ts: &AtomicI64, prev_min_ts: &AtomicI64) {
loop {
let current_min = prev_min_ts.load(Ordering::Relaxed);
if self.min_ts >= current_min {
break;
}
let Err(updated) = prev_min_ts.compare_exchange(
current_min,
self.min_ts,
Ordering::Relaxed,
Ordering::Relaxed,
) else {
break;
};
if updated == self.min_ts {
break;
}
}
loop {
let current_max = prev_max_ts.load(Ordering::Relaxed);
if self.max_ts <= current_max {
break;
}
let Err(updated) = prev_max_ts.compare_exchange(
current_max,
self.max_ts,
Ordering::Relaxed,
Ordering::Relaxed,
) else {
break;
};
if updated == self.max_ts {
break;
}
}
}
}
impl Default for WriteMetrics {
fn default() -> Self {
Self {

View File

@@ -147,8 +147,7 @@ impl TimeSeriesMemtable {
fn update_stats(&self, stats: WriteMetrics) {
self.alloc_tracker
.on_allocation(stats.key_bytes + stats.value_bytes);
self.max_timestamp.fetch_max(stats.max_ts, Ordering::SeqCst);
self.min_timestamp.fetch_min(stats.min_ts, Ordering::SeqCst);
stats.update_timestamp_range(&self.max_timestamp, &self.min_timestamp);
}
fn write_key_value(&self, kv: KeyValue, stats: &mut WriteMetrics) -> Result<()> {

View File

@@ -24,6 +24,7 @@ pub(crate) mod range;
pub(crate) mod scan_region;
pub(crate) mod scan_util;
pub(crate) mod seq_scan;
pub(crate) mod series_scan;
pub(crate) mod unordered_scan;
use std::collections::{HashMap, HashSet};

View File

@@ -363,9 +363,9 @@ mod tests {
builder
.push_field_array(
*column_id,
Arc::new(Int64Array::from_iter_values(
std::iter::repeat(*field).take(num_rows),
)),
Arc::new(Int64Array::from_iter_values(std::iter::repeat_n(
*field, num_rows,
))),
)
.unwrap();
}

View File

@@ -21,7 +21,7 @@ use datatypes::arrow::array::BooleanArray;
use datatypes::arrow::buffer::BooleanBuffer;
use snafu::ResultExt;
use crate::error::{FilterRecordBatchSnafu, Result};
use crate::error::{RecordBatchSnafu, Result};
use crate::memtable::BoxedBatchIterator;
use crate::read::last_row::RowGroupLastRowCachedReader;
use crate::read::{Batch, BatchReader};
@@ -201,7 +201,7 @@ impl PruneTimeIterator {
for filter in filters.iter() {
let result = filter
.evaluate_vector(batch.timestamps())
.context(FilterRecordBatchSnafu)?;
.context(RecordBatchSnafu)?;
mask = mask.bitand(&result);
}

View File

@@ -46,6 +46,7 @@ use crate::read::compat::{self, CompatBatch};
use crate::read::projection::ProjectionMapper;
use crate::read::range::{FileRangeBuilder, MemRangeBuilder, RangeMeta, RowGroupIndex};
use crate::read::seq_scan::SeqScan;
use crate::read::series_scan::SeriesScan;
use crate::read::unordered_scan::UnorderedScan;
use crate::read::{Batch, Source};
use crate::region::options::MergeMode;
@@ -66,6 +67,8 @@ pub(crate) enum Scanner {
Seq(SeqScan),
/// Unordered scan.
Unordered(UnorderedScan),
/// Per-series scan.
Series(SeriesScan),
}
impl Scanner {
@@ -75,6 +78,7 @@ impl Scanner {
match self {
Scanner::Seq(seq_scan) => seq_scan.build_stream(),
Scanner::Unordered(unordered_scan) => unordered_scan.build_stream().await,
Scanner::Series(series_scan) => series_scan.build_stream().await,
}
}
}
@@ -86,6 +90,7 @@ impl Scanner {
match self {
Scanner::Seq(seq_scan) => seq_scan.input().num_files(),
Scanner::Unordered(unordered_scan) => unordered_scan.input().num_files(),
Scanner::Series(series_scan) => series_scan.input().num_files(),
}
}
@@ -94,6 +99,7 @@ impl Scanner {
match self {
Scanner::Seq(seq_scan) => seq_scan.input().num_memtables(),
Scanner::Unordered(unordered_scan) => unordered_scan.input().num_memtables(),
Scanner::Series(series_scan) => series_scan.input().num_memtables(),
}
}
@@ -102,6 +108,7 @@ impl Scanner {
match self {
Scanner::Seq(seq_scan) => seq_scan.input().file_ids(),
Scanner::Unordered(unordered_scan) => unordered_scan.input().file_ids(),
Scanner::Series(series_scan) => series_scan.input().file_ids(),
}
}
@@ -113,6 +120,7 @@ impl Scanner {
match self {
Scanner::Seq(seq_scan) => seq_scan.prepare(request).unwrap(),
Scanner::Unordered(unordered_scan) => unordered_scan.prepare(request).unwrap(),
Scanner::Series(series_scan) => series_scan.prepare(request).unwrap(),
}
}
}
@@ -248,7 +256,9 @@ impl ScanRegion {
/// Returns a [Scanner] to scan the region.
pub(crate) fn scanner(self) -> Result<Scanner> {
if self.use_unordered_scan() {
if self.use_series_scan() {
self.series_scan().map(Scanner::Series)
} else if self.use_unordered_scan() {
// If table is append only and there is no series row selector, we use unordered scan in query.
// We still use seq scan in compaction.
self.unordered_scan().map(Scanner::Unordered)
@@ -260,7 +270,9 @@ impl ScanRegion {
/// Returns a [RegionScanner] to scan the region.
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
pub(crate) fn region_scanner(self) -> Result<RegionScannerRef> {
if self.use_unordered_scan() {
if self.use_series_scan() {
self.series_scan().map(|scanner| Box::new(scanner) as _)
} else if self.use_unordered_scan() {
self.unordered_scan().map(|scanner| Box::new(scanner) as _)
} else {
self.seq_scan().map(|scanner| Box::new(scanner) as _)
@@ -279,6 +291,12 @@ impl ScanRegion {
Ok(UnorderedScan::new(input))
}
/// Scans by series.
pub(crate) fn series_scan(self) -> Result<SeriesScan> {
let input = self.scan_input(true)?;
Ok(SeriesScan::new(input))
}
#[cfg(test)]
pub(crate) fn scan_without_filter_deleted(self) -> Result<SeqScan> {
let input = self.scan_input(false)?;
@@ -299,6 +317,11 @@ impl ScanRegion {
|| self.request.distribution == Some(TimeSeriesDistribution::TimeWindowed))
}
/// Returns true if the region can use series scan for current request.
fn use_series_scan(&self) -> bool {
self.request.distribution == Some(TimeSeriesDistribution::PerSeries)
}
/// Creates a scan input.
fn scan_input(mut self, filter_deleted: bool) -> Result<ScanInput> {
let time_range = self.build_time_range_predicate();
@@ -322,10 +345,13 @@ impl ScanRegion {
let memtables: Vec<_> = memtables
.into_iter()
.filter(|mem| {
// check if memtable is empty by reading stats.
let Some((start, end)) = mem.stats().time_range() else {
if mem.is_empty() {
return false;
};
}
let stats = mem.stats();
// Safety: the memtable is not empty.
let (start, end) = stats.time_range().unwrap();
// The time range of the memtable is inclusive.
let memtable_range = TimestampRange::new_inclusive(Some(start), Some(end));
memtable_range.intersects(&time_range)

View File

@@ -92,6 +92,8 @@ struct ScanMetricsSet {
/// Elapsed time before the first poll operation.
first_poll: Duration,
/// Number of send timeout in SeriesScan.
num_series_send_timeout: usize,
}
impl fmt::Debug for ScanMetricsSet {
@@ -122,6 +124,7 @@ impl fmt::Debug for ScanMetricsSet {
num_sst_batches,
num_sst_rows,
first_poll,
num_series_send_timeout,
} = self;
write!(
@@ -150,7 +153,8 @@ impl fmt::Debug for ScanMetricsSet {
num_sst_record_batches={num_sst_record_batches}, \
num_sst_batches={num_sst_batches}, \
num_sst_rows={num_sst_rows}, \
first_poll={first_poll:?}}}"
first_poll={first_poll:?}, \
num_series_send_timeout={num_series_send_timeout}}}"
)
}
}
@@ -439,6 +443,12 @@ impl PartitionMetrics {
pub(crate) fn on_finish(&self) {
self.0.on_finish();
}
/// Sets the `num_series_send_timeout`.
pub(crate) fn set_num_series_send_timeout(&self, num_timeout: usize) {
let mut metrics = self.0.metrics.lock().unwrap();
metrics.num_series_send_timeout = num_timeout;
}
}
impl fmt::Debug for PartitionMetrics {

View File

@@ -30,7 +30,7 @@ use datatypes::schema::SchemaRef;
use snafu::ResultExt;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{PartitionRange, PrepareRequest, RegionScanner, ScannerProperties};
use store_api::storage::{TimeSeriesDistribution, TimeSeriesRowSelector};
use store_api::storage::TimeSeriesRowSelector;
use tokio::sync::Semaphore;
use crate::error::{PartitionOutOfRangeSnafu, Result};
@@ -149,7 +149,7 @@ impl SeqScan {
/// Builds a reader to read sources. If `semaphore` is provided, reads sources in parallel
/// if possible.
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
async fn build_reader_from_sources(
pub(crate) async fn build_reader_from_sources(
stream_ctx: &StreamContext,
mut sources: Vec<Source>,
semaphore: Option<Arc<Semaphore>>,
@@ -206,9 +206,13 @@ impl SeqScan {
.build(),
));
}
if self.stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) {
return self.scan_partition_by_series(metrics_set, partition);
if self.properties.partitions[partition].is_empty() {
return Ok(Box::pin(RecordBatchStreamWrapper::new(
self.stream_ctx.input.mapper.output_schema(),
common_recordbatch::EmptyRecordBatchStream::new(
self.stream_ctx.input.mapper.output_schema(),
),
)));
}
let stream_ctx = self.stream_ctx.clone();
@@ -237,14 +241,14 @@ impl SeqScan {
&mut sources,
);
let mut metrics = ScannerMetrics::default();
let mut fetch_start = Instant::now();
let mut reader =
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let cache = &stream_ctx.input.cache_strategy;
let mut metrics = ScannerMetrics::default();
let mut fetch_start = Instant::now();
#[cfg(debug_assertions)]
let mut checker = crate::read::BatchChecker::default()
.with_start(Some(part_range.start))
@@ -307,97 +311,6 @@ impl SeqScan {
Ok(stream)
}
/// Scans all ranges in the given partition and merge by time series.
/// Otherwise the returned stream might not contains any data.
fn scan_partition_by_series(
&self,
metrics_set: &ExecutionPlanMetricsSet,
partition: usize,
) -> Result<SendableRecordBatchStream, BoxedError> {
let stream_ctx = self.stream_ctx.clone();
let semaphore = self.new_semaphore();
let partition_ranges = self.properties.partitions[partition].clone();
let distinguish_range = self.properties.distinguish_partition_range;
let part_metrics = self.new_partition_metrics(metrics_set, partition);
debug_assert!(!self.compaction);
let stream = try_stream! {
part_metrics.on_first_poll();
let range_builder_list = Arc::new(RangeBuilderList::new(
stream_ctx.input.num_memtables(),
stream_ctx.input.num_files(),
));
// Scans all parts.
let mut sources = Vec::with_capacity(partition_ranges.len());
for part_range in partition_ranges {
build_sources(
&stream_ctx,
&part_range,
false,
&part_metrics,
range_builder_list.clone(),
&mut sources,
);
}
// Builds a reader that merge sources from all parts.
let mut reader =
Self::build_reader_from_sources(&stream_ctx, sources, semaphore.clone())
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let cache = &stream_ctx.input.cache_strategy;
let mut metrics = ScannerMetrics::default();
let mut fetch_start = Instant::now();
while let Some(batch) = reader
.next_batch()
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
{
metrics.scan_cost += fetch_start.elapsed();
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
debug_assert!(!batch.is_empty());
if batch.is_empty() {
continue;
}
let convert_start = Instant::now();
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
let yield_start = Instant::now();
yield record_batch;
metrics.yield_cost += yield_start.elapsed();
fetch_start = Instant::now();
}
// Yields an empty part to indicate this range is terminated.
// The query engine can use this to optimize some queries.
if distinguish_range {
let yield_start = Instant::now();
yield stream_ctx.input.mapper.empty_record_batch();
metrics.yield_cost += yield_start.elapsed();
}
metrics.scan_cost += fetch_start.elapsed();
part_metrics.merge_metrics(&metrics);
part_metrics.on_finish();
};
let stream = Box::pin(RecordBatchStreamWrapper::new(
self.stream_ctx.input.mapper.output_schema(),
Box::pin(stream),
));
Ok(stream)
}
fn new_semaphore(&self) -> Option<Arc<Semaphore>> {
if self.properties.target_partitions() > self.properties.num_partitions() {
// We can use additional tasks to read the data if we have more target partitions than actual partitions.
@@ -498,7 +411,7 @@ impl fmt::Debug for SeqScan {
}
/// Builds sources for the partition range and push them to the `sources` vector.
fn build_sources(
pub(crate) fn build_sources(
stream_ctx: &Arc<StreamContext>,
part_range: &PartitionRange,
compaction: bool,
@@ -509,8 +422,8 @@ fn build_sources(
// Gets range meta.
let range_meta = &stream_ctx.ranges[part_range.identifier];
#[cfg(debug_assertions)]
if compaction || stream_ctx.input.distribution == Some(TimeSeriesDistribution::PerSeries) {
// Compaction or per series distribution expects input sources are not been split.
if compaction {
// Compaction expects input sources are not been split.
debug_assert_eq!(range_meta.indices.len(), range_meta.row_group_indices.len());
for (i, row_group_idx) in range_meta.row_group_indices.iter().enumerate() {
// It should scan all row groups.

View File

@@ -0,0 +1,547 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Per-series scan implementation.
use std::fmt;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use async_stream::try_stream;
use common_error::ext::BoxedError;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::util::ChainedRecordBatchStream;
use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
use datatypes::compute::concat_batches;
use datatypes::schema::SchemaRef;
use smallvec::{smallvec, SmallVec};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{PartitionRange, PrepareRequest, RegionScanner, ScannerProperties};
use tokio::sync::mpsc::error::{SendTimeoutError, TrySendError};
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio::sync::Semaphore;
use crate::error::{
ComputeArrowSnafu, Error, InvalidSenderSnafu, PartitionOutOfRangeSnafu, Result,
ScanMultiTimesSnafu, ScanSeriesSnafu,
};
use crate::read::range::RangeBuilderList;
use crate::read::scan_region::{ScanInput, StreamContext};
use crate::read::scan_util::{PartitionMetrics, PartitionMetricsList};
use crate::read::seq_scan::{build_sources, SeqScan};
use crate::read::{Batch, ScannerMetrics};
/// Timeout to send a batch to a sender.
const SEND_TIMEOUT: Duration = Duration::from_millis(10);
/// List of receivers.
type ReceiverList = Vec<Option<Receiver<Result<SeriesBatch>>>>;
/// Scans a region and returns sorted rows of a series in the same partition.
///
/// The output order is always order by `(primary key, time index)` inside every
/// partition.
/// Always returns the same series (primary key) to the same partition.
pub struct SeriesScan {
/// Properties of the scanner.
properties: ScannerProperties,
/// Context of streams.
stream_ctx: Arc<StreamContext>,
/// Receivers of each partition.
receivers: Mutex<ReceiverList>,
/// Metrics for each partition.
/// The scanner only sets in query and keeps it empty during compaction.
metrics_list: Arc<PartitionMetricsList>,
}
impl SeriesScan {
/// Creates a new [SeriesScan].
pub(crate) fn new(input: ScanInput) -> Self {
let mut properties = ScannerProperties::default()
.with_append_mode(input.append_mode)
.with_total_rows(input.total_rows());
let stream_ctx = Arc::new(StreamContext::seq_scan_ctx(input, false));
properties.partitions = vec![stream_ctx.partition_ranges()];
Self {
properties,
stream_ctx,
receivers: Mutex::new(Vec::new()),
metrics_list: Arc::new(PartitionMetricsList::default()),
}
}
fn scan_partition_impl(
&self,
metrics_set: &ExecutionPlanMetricsSet,
partition: usize,
) -> Result<SendableRecordBatchStream, BoxedError> {
if partition >= self.properties.num_partitions() {
return Err(BoxedError::new(
PartitionOutOfRangeSnafu {
given: partition,
all: self.properties.num_partitions(),
}
.build(),
));
}
self.maybe_start_distributor(metrics_set, &self.metrics_list);
let part_metrics =
new_partition_metrics(&self.stream_ctx, metrics_set, partition, &self.metrics_list);
let mut receiver = self.take_receiver(partition).map_err(BoxedError::new)?;
let stream_ctx = self.stream_ctx.clone();
let stream = try_stream! {
part_metrics.on_first_poll();
let cache = &stream_ctx.input.cache_strategy;
let mut df_record_batches = Vec::new();
let mut fetch_start = Instant::now();
while let Some(result) = receiver.recv().await {
let mut metrics = ScannerMetrics::default();
let series = result.map_err(BoxedError::new).context(ExternalSnafu)?;
metrics.scan_cost += fetch_start.elapsed();
fetch_start = Instant::now();
let convert_start = Instant::now();
df_record_batches.reserve(series.batches.len());
for batch in series.batches {
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
df_record_batches.push(record_batch.into_df_record_batch());
}
let output_schema = stream_ctx.input.mapper.output_schema();
let df_record_batch =
concat_batches(output_schema.arrow_schema(), &df_record_batches)
.context(ComputeArrowSnafu)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
df_record_batches.clear();
let record_batch =
RecordBatch::try_from_df_record_batch(output_schema, df_record_batch)?;
metrics.convert_cost += convert_start.elapsed();
let yield_start = Instant::now();
yield record_batch;
metrics.yield_cost += yield_start.elapsed();
part_metrics.merge_metrics(&metrics);
}
};
let stream = Box::pin(RecordBatchStreamWrapper::new(
self.stream_ctx.input.mapper.output_schema(),
Box::pin(stream),
));
Ok(stream)
}
/// Takes the receiver for the partition.
fn take_receiver(&self, partition: usize) -> Result<Receiver<Result<SeriesBatch>>> {
let mut rx_list = self.receivers.lock().unwrap();
rx_list[partition]
.take()
.context(ScanMultiTimesSnafu { partition })
}
/// Starts the distributor if the receiver list is empty.
fn maybe_start_distributor(
&self,
metrics_set: &ExecutionPlanMetricsSet,
metrics_list: &Arc<PartitionMetricsList>,
) {
let mut rx_list = self.receivers.lock().unwrap();
if !rx_list.is_empty() {
return;
}
let (senders, receivers) = new_channel_list(self.properties.num_partitions());
let mut distributor = SeriesDistributor {
stream_ctx: self.stream_ctx.clone(),
semaphore: Some(Arc::new(Semaphore::new(self.properties.num_partitions()))),
partitions: self.properties.partitions.clone(),
senders,
metrics_set: metrics_set.clone(),
metrics_list: metrics_list.clone(),
};
common_runtime::spawn_global(async move {
distributor.execute().await;
});
*rx_list = receivers;
}
/// Scans the region and returns a stream.
pub(crate) async fn build_stream(&self) -> Result<SendableRecordBatchStream, BoxedError> {
let part_num = self.properties.num_partitions();
let metrics_set = ExecutionPlanMetricsSet::default();
let streams = (0..part_num)
.map(|i| self.scan_partition(&metrics_set, i))
.collect::<Result<Vec<_>, BoxedError>>()?;
let chained_stream = ChainedRecordBatchStream::new(streams).map_err(BoxedError::new)?;
Ok(Box::pin(chained_stream))
}
}
fn new_channel_list(num_partitions: usize) -> (SenderList, ReceiverList) {
let (senders, receivers): (Vec<_>, Vec<_>) = (0..num_partitions)
.map(|_| {
let (sender, receiver) = mpsc::channel(1);
(Some(sender), Some(receiver))
})
.unzip();
(SenderList::new(senders), receivers)
}
impl RegionScanner for SeriesScan {
fn properties(&self) -> &ScannerProperties {
&self.properties
}
fn schema(&self) -> SchemaRef {
self.stream_ctx.input.mapper.output_schema()
}
fn metadata(&self) -> RegionMetadataRef {
self.stream_ctx.input.mapper.metadata().clone()
}
fn scan_partition(
&self,
metrics_set: &ExecutionPlanMetricsSet,
partition: usize,
) -> Result<SendableRecordBatchStream, BoxedError> {
self.scan_partition_impl(metrics_set, partition)
}
fn prepare(&mut self, request: PrepareRequest) -> Result<(), BoxedError> {
self.properties.prepare(request);
Ok(())
}
fn has_predicate(&self) -> bool {
let predicate = self.stream_ctx.input.predicate();
predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false)
}
fn set_logical_region(&mut self, logical_region: bool) {
self.properties.set_logical_region(logical_region);
}
}
impl DisplayAs for SeriesScan {
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"SeriesScan: region={}, ",
self.stream_ctx.input.mapper.metadata().region_id
)?;
match t {
DisplayFormatType::Default => self.stream_ctx.format_for_explain(false, f),
DisplayFormatType::Verbose => {
self.stream_ctx.format_for_explain(true, f)?;
self.metrics_list.format_verbose_metrics(f)
}
}
}
}
impl fmt::Debug for SeriesScan {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SeriesScan")
.field("num_ranges", &self.stream_ctx.ranges.len())
.finish()
}
}
#[cfg(test)]
impl SeriesScan {
/// Returns the input.
pub(crate) fn input(&self) -> &ScanInput {
&self.stream_ctx.input
}
}
/// The distributor scans series and distributes them to different partitions.
struct SeriesDistributor {
/// Context for the scan stream.
stream_ctx: Arc<StreamContext>,
/// Optional semaphore for limiting the number of concurrent scans.
semaphore: Option<Arc<Semaphore>>,
/// Partition ranges to scan.
partitions: Vec<Vec<PartitionRange>>,
/// Senders of all partitions.
senders: SenderList,
/// Metrics set to report.
/// The distributor report the metrics as an additional partition.
/// This may double the scan cost of the [SeriesScan] metrics. We can
/// get per-partition metrics in verbose mode to see the metrics of the
/// distributor.
metrics_set: ExecutionPlanMetricsSet,
metrics_list: Arc<PartitionMetricsList>,
}
impl SeriesDistributor {
/// Executes the distributor.
async fn execute(&mut self) {
if let Err(e) = self.scan_partitions().await {
self.senders.send_error(e).await;
}
}
/// Scans all parts.
async fn scan_partitions(&mut self) -> Result<()> {
let part_metrics = new_partition_metrics(
&self.stream_ctx,
&self.metrics_set,
self.partitions.len(),
&self.metrics_list,
);
part_metrics.on_first_poll();
let range_builder_list = Arc::new(RangeBuilderList::new(
self.stream_ctx.input.num_memtables(),
self.stream_ctx.input.num_files(),
));
// Scans all parts.
let mut sources = Vec::with_capacity(self.partitions.len());
for partition in &self.partitions {
sources.reserve(partition.len());
for part_range in partition {
build_sources(
&self.stream_ctx,
part_range,
false,
&part_metrics,
range_builder_list.clone(),
&mut sources,
);
}
}
// Builds a reader that merge sources from all parts.
let mut reader =
SeqScan::build_reader_from_sources(&self.stream_ctx, sources, self.semaphore.clone())
.await?;
let mut metrics = ScannerMetrics::default();
let mut fetch_start = Instant::now();
let mut current_series = SeriesBatch::default();
while let Some(batch) = reader.next_batch().await? {
metrics.scan_cost += fetch_start.elapsed();
fetch_start = Instant::now();
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
debug_assert!(!batch.is_empty());
if batch.is_empty() {
continue;
}
let Some(last_key) = current_series.current_key() else {
current_series.push(batch);
continue;
};
if last_key == batch.primary_key() {
current_series.push(batch);
continue;
}
// We find a new series, send the current one.
let to_send = std::mem::replace(&mut current_series, SeriesBatch::single(batch));
let yield_start = Instant::now();
self.senders.send_batch(to_send).await?;
metrics.yield_cost += yield_start.elapsed();
}
if !current_series.is_empty() {
let yield_start = Instant::now();
self.senders.send_batch(current_series).await?;
metrics.yield_cost += yield_start.elapsed();
}
metrics.scan_cost += fetch_start.elapsed();
part_metrics.merge_metrics(&metrics);
part_metrics.set_num_series_send_timeout(self.senders.num_timeout);
part_metrics.on_finish();
Ok(())
}
}
/// Batches of the same series.
#[derive(Default)]
struct SeriesBatch {
batches: SmallVec<[Batch; 4]>,
}
impl SeriesBatch {
/// Creates a new [SeriesBatch] from a single [Batch].
fn single(batch: Batch) -> Self {
Self {
batches: smallvec![batch],
}
}
fn current_key(&self) -> Option<&[u8]> {
self.batches.first().map(|batch| batch.primary_key())
}
fn push(&mut self, batch: Batch) {
self.batches.push(batch);
}
/// Returns true if there is no batch.
fn is_empty(&self) -> bool {
self.batches.is_empty()
}
}
/// List of senders.
struct SenderList {
senders: Vec<Option<Sender<Result<SeriesBatch>>>>,
/// Number of None senders.
num_nones: usize,
/// Index of the current partition to send.
sender_idx: usize,
/// Number of timeout.
num_timeout: usize,
}
impl SenderList {
fn new(senders: Vec<Option<Sender<Result<SeriesBatch>>>>) -> Self {
let num_nones = senders.iter().filter(|sender| sender.is_none()).count();
Self {
senders,
num_nones,
sender_idx: 0,
num_timeout: 0,
}
}
/// Finds a partition and tries to send the batch to the partition.
/// Returns None if it sends successfully.
fn try_send_batch(&mut self, mut batch: SeriesBatch) -> Result<Option<SeriesBatch>> {
for _ in 0..self.senders.len() {
ensure!(self.num_nones < self.senders.len(), InvalidSenderSnafu);
let sender_idx = self.fetch_add_sender_idx();
let Some(sender) = &self.senders[sender_idx] else {
continue;
};
match sender.try_send(Ok(batch)) {
Ok(()) => return Ok(None),
Err(TrySendError::Full(res)) => {
// Safety: we send Ok.
batch = res.unwrap();
}
Err(TrySendError::Closed(res)) => {
self.senders[sender_idx] = None;
self.num_nones += 1;
// Safety: we send Ok.
batch = res.unwrap();
}
}
}
Ok(Some(batch))
}
/// Finds a partition and sends the batch to the partition.
async fn send_batch(&mut self, mut batch: SeriesBatch) -> Result<()> {
// Sends the batch without blocking first.
match self.try_send_batch(batch)? {
Some(b) => {
// Unable to send batch to partition.
batch = b;
}
None => {
return Ok(());
}
}
loop {
ensure!(self.num_nones < self.senders.len(), InvalidSenderSnafu);
let sender_idx = self.fetch_add_sender_idx();
let Some(sender) = &self.senders[sender_idx] else {
continue;
};
// Adds a timeout to avoid blocking indefinitely and sending
// the batch in a round-robin fashion when some partitions
// don't poll their inputs. This may happen if we have a
// node like sort merging. But it is rare when we are using SeriesScan.
match sender.send_timeout(Ok(batch), SEND_TIMEOUT).await {
Ok(()) => break,
Err(SendTimeoutError::Timeout(res)) => {
self.num_timeout += 1;
// Safety: we send Ok.
batch = res.unwrap();
}
Err(SendTimeoutError::Closed(res)) => {
self.senders[sender_idx] = None;
self.num_nones += 1;
// Safety: we send Ok.
batch = res.unwrap();
}
}
}
Ok(())
}
async fn send_error(&self, error: Error) {
let error = Arc::new(error);
for sender in self.senders.iter().flatten() {
let result = Err(error.clone()).context(ScanSeriesSnafu);
let _ = sender.send(result).await;
}
}
fn fetch_add_sender_idx(&mut self) -> usize {
let sender_idx = self.sender_idx;
self.sender_idx = (self.sender_idx + 1) % self.senders.len();
sender_idx
}
}
fn new_partition_metrics(
stream_ctx: &StreamContext,
metrics_set: &ExecutionPlanMetricsSet,
partition: usize,
metrics_list: &PartitionMetricsList,
) -> PartitionMetrics {
let metrics = PartitionMetrics::new(
stream_ctx.input.mapper.metadata().region_id,
partition,
"SeriesScan",
stream_ctx.query_start,
metrics_set,
);
metrics_list.set(partition, metrics.clone());
metrics
}

View File

@@ -346,7 +346,6 @@ impl BloomFilterIndexer {
#[cfg(test)]
pub(crate) mod tests {
use std::iter;
use api::v1::SemanticType;
use datatypes::data_type::ConcreteDataType;
@@ -461,15 +460,15 @@ pub(crate) mod tests {
Batch::new(
primary_key,
Arc::new(UInt64Vector::from_iter_values(
iter::repeat(0).take(num_rows),
)),
Arc::new(UInt64Vector::from_iter_values(
iter::repeat(0).take(num_rows),
)),
Arc::new(UInt8Vector::from_iter_values(
iter::repeat(1).take(num_rows),
)),
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
0, num_rows,
))),
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
0, num_rows,
))),
Arc::new(UInt8Vector::from_iter_values(std::iter::repeat_n(
1, num_rows,
))),
vec![u64_field],
)
.unwrap()

View File

@@ -489,12 +489,12 @@ mod tests {
Arc::new(UInt64Vector::from_iter_values(
(0..num_rows).map(|n| n as u64),
)),
Arc::new(UInt64Vector::from_iter_values(
std::iter::repeat(0).take(num_rows),
)),
Arc::new(UInt8Vector::from_iter_values(
std::iter::repeat(1).take(num_rows),
)),
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
0, num_rows,
))),
Arc::new(UInt8Vector::from_iter_values(std::iter::repeat_n(
1, num_rows,
))),
vec![
BatchColumn {
column_id: 1,

View File

@@ -326,7 +326,6 @@ impl InvertedIndexer {
#[cfg(test)]
mod tests {
use std::collections::BTreeSet;
use std::iter;
use api::v1::SemanticType;
use datafusion_expr::{binary_expr, col, lit, Expr as DfExpr, Operator};
@@ -424,15 +423,15 @@ mod tests {
Batch::new(
primary_key,
Arc::new(UInt64Vector::from_iter_values(
iter::repeat(0).take(num_rows),
)),
Arc::new(UInt64Vector::from_iter_values(
iter::repeat(0).take(num_rows),
)),
Arc::new(UInt8Vector::from_iter_values(
iter::repeat(1).take(num_rows),
)),
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
0, num_rows,
))),
Arc::new(UInt64Vector::from_iter_values(std::iter::repeat_n(
0, num_rows,
))),
Arc::new(UInt8Vector::from_iter_values(std::iter::repeat_n(
1, num_rows,
))),
vec![u64_field],
)
.unwrap()

View File

@@ -27,7 +27,7 @@ use snafu::{OptionExt, ResultExt};
use store_api::storage::TimeSeriesRowSelector;
use crate::error::{
DecodeStatsSnafu, FieldTypeMismatchSnafu, FilterRecordBatchSnafu, Result, StatsNotPresentSnafu,
DecodeStatsSnafu, FieldTypeMismatchSnafu, RecordBatchSnafu, Result, StatsNotPresentSnafu,
};
use crate::read::compat::CompatBatch;
use crate::read::last_row::RowGroupLastRowCachedReader;
@@ -294,7 +294,7 @@ impl RangeBase {
};
if filter
.evaluate_scalar(&pk_value)
.context(FilterRecordBatchSnafu)?
.context(RecordBatchSnafu)?
{
continue;
} else {
@@ -311,11 +311,11 @@ impl RangeBase {
let field_col = &input.fields()[field_index].data;
filter
.evaluate_vector(field_col)
.context(FilterRecordBatchSnafu)?
.context(RecordBatchSnafu)?
}
SemanticType::Timestamp => filter
.evaluate_vector(input.timestamps())
.context(FilterRecordBatchSnafu)?,
.context(RecordBatchSnafu)?,
};
mask = mask.bitand(&result);

View File

@@ -134,7 +134,6 @@ impl WriteFormat {
/// Helper for reading the SST format.
pub struct ReadFormat {
/// The metadata stored in the SST.
metadata: RegionMetadataRef,
/// SST file schema.
arrow_schema: SchemaRef,
@@ -306,23 +305,17 @@ impl ReadFormat {
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column_id: ColumnId,
) -> StatValues {
let Some(column) = self.metadata.column_by_id(column_id) else {
// No such column in the SST.
return StatValues::NoColumn;
};
) -> Option<ArrayRef> {
let column = self.metadata.column_by_id(column_id)?;
match column.semantic_type {
SemanticType::Tag => self.tag_values(row_groups, column, true),
SemanticType::Field => {
// Safety: `field_id_to_index` is initialized by the semantic type.
let index = self.field_id_to_index.get(&column_id).unwrap();
let stats = Self::column_values(row_groups, column, *index, true);
StatValues::from_stats_opt(stats)
let index = self.field_id_to_index.get(&column_id)?;
Self::column_values(row_groups, column, *index, true)
}
SemanticType::Timestamp => {
let index = self.time_index_position();
let stats = Self::column_values(row_groups, column, index, true);
StatValues::from_stats_opt(stats)
Self::column_values(row_groups, column, index, true)
}
}
}
@@ -332,23 +325,17 @@ impl ReadFormat {
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column_id: ColumnId,
) -> StatValues {
let Some(column) = self.metadata.column_by_id(column_id) else {
// No such column in the SST.
return StatValues::NoColumn;
};
) -> Option<ArrayRef> {
let column = self.metadata.column_by_id(column_id)?;
match column.semantic_type {
SemanticType::Tag => self.tag_values(row_groups, column, false),
SemanticType::Field => {
// Safety: `field_id_to_index` is initialized by the semantic type.
let index = self.field_id_to_index.get(&column_id).unwrap();
let stats = Self::column_values(row_groups, column, *index, false);
StatValues::from_stats_opt(stats)
let index = self.field_id_to_index.get(&column_id)?;
Self::column_values(row_groups, column, *index, false)
}
SemanticType::Timestamp => {
let index = self.time_index_position();
let stats = Self::column_values(row_groups, column, index, false);
StatValues::from_stats_opt(stats)
Self::column_values(row_groups, column, index, false)
}
}
}
@@ -358,23 +345,17 @@ impl ReadFormat {
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column_id: ColumnId,
) -> StatValues {
let Some(column) = self.metadata.column_by_id(column_id) else {
// No such column in the SST.
return StatValues::NoColumn;
};
) -> Option<ArrayRef> {
let column = self.metadata.column_by_id(column_id)?;
match column.semantic_type {
SemanticType::Tag => StatValues::NoStats,
SemanticType::Tag => None,
SemanticType::Field => {
// Safety: `field_id_to_index` is initialized by the semantic type.
let index = self.field_id_to_index.get(&column_id).unwrap();
let stats = Self::column_null_counts(row_groups, *index);
StatValues::from_stats_opt(stats)
let index = self.field_id_to_index.get(&column_id)?;
Self::column_null_counts(row_groups, *index)
}
SemanticType::Timestamp => {
let index = self.time_index_position();
let stats = Self::column_null_counts(row_groups, index);
StatValues::from_stats_opt(stats)
Self::column_null_counts(row_groups, index)
}
}
}
@@ -409,7 +390,8 @@ impl ReadFormat {
row_groups: &[impl Borrow<RowGroupMetaData>],
column: &ColumnMetadata,
is_min: bool,
) -> StatValues {
) -> Option<ArrayRef> {
let primary_key_encoding = self.metadata.primary_key_encoding;
let is_first_tag = self
.metadata
.primary_key
@@ -418,28 +400,9 @@ impl ReadFormat {
.unwrap_or(false);
if !is_first_tag {
// Only the min-max of the first tag is available in the primary key.
return StatValues::NoStats;
return None;
}
StatValues::from_stats_opt(self.first_tag_values(row_groups, column, is_min))
}
/// Returns min/max values of the first tag.
/// Returns None if the tag does not have statistics.
fn first_tag_values(
&self,
row_groups: &[impl Borrow<RowGroupMetaData>],
column: &ColumnMetadata,
is_min: bool,
) -> Option<ArrayRef> {
debug_assert!(self
.metadata
.primary_key
.first()
.map(|id| *id == column.column_id)
.unwrap_or(false));
let primary_key_encoding = self.metadata.primary_key_encoding;
let converter = build_primary_key_codec_with_fields(
primary_key_encoding,
[(
@@ -489,7 +452,6 @@ impl ReadFormat {
}
/// Returns min/max values of specific non-tag columns.
/// Returns None if the column does not have statistics.
fn column_values(
row_groups: &[impl Borrow<RowGroupMetaData>],
column: &ColumnMetadata,
@@ -582,29 +544,6 @@ impl ReadFormat {
}
}
/// Values of column statistics of the SST.
///
/// It also distinguishes the case that a column is not found and
/// the column exists but has no statistics.
pub enum StatValues {
/// Values of each row group.
Values(ArrayRef),
/// No such column.
NoColumn,
/// Column exists but has no statistics.
NoStats,
}
impl StatValues {
/// Creates a new `StatValues` instance from optional statistics.
pub fn from_stats_opt(stats: Option<ArrayRef>) -> Self {
match stats {
Some(stats) => StatValues::Values(stats),
None => StatValues::NoStats,
}
}
}
#[cfg(test)]
impl ReadFormat {
/// Creates a helper with existing `metadata` and all columns.
@@ -816,7 +755,7 @@ mod tests {
));
let mut keys = vec![];
for (index, num_rows) in pk_row_nums.iter().map(|v| v.1).enumerate() {
keys.extend(std::iter::repeat(index as u32).take(num_rows));
keys.extend(std::iter::repeat_n(index as u32, num_rows));
}
let keys = UInt32Array::from(keys);
Arc::new(DictionaryArray::new(keys, values))

View File

@@ -25,7 +25,7 @@ use parquet::file::metadata::RowGroupMetaData;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::ColumnId;
use crate::sst::parquet::format::{ReadFormat, StatValues};
use crate::sst::parquet::format::ReadFormat;
/// Statistics for pruning row groups.
pub(crate) struct RowGroupPruningStats<'a, T> {
@@ -100,18 +100,16 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
fn min_values(&self, column: &Column) -> Option<ArrayRef> {
let column_id = self.column_id_to_prune(&column.name)?;
match self.read_format.min_values(self.row_groups, column_id) {
StatValues::Values(values) => Some(values),
StatValues::NoColumn => self.compat_default_value(&column.name),
StatValues::NoStats => None,
Some(values) => Some(values),
None => self.compat_default_value(&column.name),
}
}
fn max_values(&self, column: &Column) -> Option<ArrayRef> {
let column_id = self.column_id_to_prune(&column.name)?;
match self.read_format.max_values(self.row_groups, column_id) {
StatValues::Values(values) => Some(values),
StatValues::NoColumn => self.compat_default_value(&column.name),
StatValues::NoStats => None,
Some(values) => Some(values),
None => self.compat_default_value(&column.name),
}
}
@@ -120,12 +118,10 @@ impl<T: Borrow<RowGroupMetaData>> PruningStatistics for RowGroupPruningStats<'_,
}
fn null_counts(&self, column: &Column) -> Option<ArrayRef> {
let column_id = self.column_id_to_prune(&column.name)?;
match self.read_format.null_counts(self.row_groups, column_id) {
StatValues::Values(values) => Some(values),
StatValues::NoColumn => self.compat_null_count(&column.name),
StatValues::NoStats => None,
}
let Some(column_id) = self.column_id_to_prune(&column.name) else {
return self.compat_null_count(&column.name);
};
self.read_format.null_counts(self.row_groups, column_id)
}
fn row_counts(&self, _column: &Column) -> Option<ArrayRef> {

View File

@@ -85,11 +85,9 @@ impl ImpureDefaultFiller {
.schema
.iter()
.filter_map(|schema| {
if self.impure_columns.contains_key(&schema.column_name) {
Some(&schema.column_name)
} else {
None
}
self.impure_columns
.contains_key(&schema.column_name)
.then_some(&schema.column_name)
})
.collect();

View File

@@ -37,7 +37,7 @@ use common_meta::rpc::ddl::{
};
use common_meta::rpc::router::{Partition, Partition as MetaPartition};
use common_query::Output;
use common_telemetry::{debug, info, tracing, warn};
use common_telemetry::{debug, info, tracing};
use common_time::Timezone;
use datafusion_common::tree_node::TreeNodeVisitor;
use datafusion_expr::LogicalPlan;
@@ -369,7 +369,7 @@ impl StatementExecutor {
query_context: QueryContextRef,
) -> Result<SubmitDdlTaskResponse> {
let flow_type = self
.determine_flow_type(&expr, query_context.clone())
.determine_flow_type(&expr.sql, query_context.clone())
.await?;
info!("determined flow={} type: {:#?}", expr.flow_name, flow_type);
@@ -398,49 +398,9 @@ impl StatementExecutor {
/// Determine the flow type based on the SQL query
///
/// If it contains aggregation or distinct, then it is a batch flow, otherwise it is a streaming flow
async fn determine_flow_type(
&self,
expr: &CreateFlowExpr,
query_ctx: QueryContextRef,
) -> Result<FlowType> {
// first check if source table's ttl is instant, if it is, force streaming mode
for src_table_name in &expr.source_table_names {
let table = self
.catalog_manager()
.table(
&src_table_name.catalog_name,
&src_table_name.schema_name,
&src_table_name.table_name,
Some(&query_ctx),
)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?
.with_context(|| TableNotFoundSnafu {
table_name: format_full_table_name(
&src_table_name.catalog_name,
&src_table_name.schema_name,
&src_table_name.table_name,
),
})?;
// instant source table can only be handled by streaming mode
if table.table_info().meta.options.ttl == Some(common_time::TimeToLive::Instant) {
warn!(
"Source table `{}` for flow `{}`'s ttl=instant, fallback to streaming mode",
format_full_table_name(
&src_table_name.catalog_name,
&src_table_name.schema_name,
&src_table_name.table_name
),
expr.flow_name
);
return Ok(FlowType::Streaming);
}
}
async fn determine_flow_type(&self, sql: &str, query_ctx: QueryContextRef) -> Result<FlowType> {
let engine = &self.query_engine;
let stmt = QueryLanguageParser::parse_sql(&expr.sql, &query_ctx)
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
let plan = engine

View File

@@ -325,7 +325,7 @@ impl std::str::FromStr for Pattern {
impl Pattern {
fn check(&self) -> Result<()> {
if self.len() == 0 {
if self.is_empty() {
return DissectEmptyPatternSnafu.fail();
}

View File

@@ -91,9 +91,9 @@ impl UserDefinedLogicalNodeCore for InstantManipulate {
_exprs: Vec<Expr>,
inputs: Vec<LogicalPlan>,
) -> DataFusionResult<Self> {
if inputs.is_empty() {
if inputs.len() != 1 {
return Err(DataFusionError::Internal(
"InstantManipulate should have at least one input".to_string(),
"InstantManipulate should have exact one input".to_string(),
));
}
@@ -354,6 +354,9 @@ impl Stream for InstantManipulateStream {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let poll = match ready!(self.input.poll_next_unpin(cx)) {
Some(Ok(batch)) => {
if batch.num_rows() == 0 {
return Poll::Pending;
}
let timer = std::time::Instant::now();
self.num_series.add(1);
let result = Ok(batch).and_then(|batch| self.manipulate(batch));

View File

@@ -42,7 +42,7 @@ use greptime_proto::substrait_extension as pb;
use prost::Message;
use snafu::ResultExt;
use crate::error::{DataFusionPlanningSnafu, DeserializeSnafu, Result};
use crate::error::{DeserializeSnafu, Result};
use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES};
use crate::metrics::PROMQL_SERIES_COUNT;
use crate::range_array::RangeArray;
@@ -194,20 +194,26 @@ impl RangeManipulate {
pub fn deserialize(bytes: &[u8]) -> Result<Self> {
let pb_range_manipulate = pb::RangeManipulate::decode(bytes).context(DeserializeSnafu)?;
let empty_schema = Arc::new(DFSchema::empty());
let placeholder_plan = LogicalPlan::EmptyRelation(EmptyRelation {
produce_one_row: false,
schema: Arc::new(DFSchema::empty()),
schema: empty_schema.clone(),
});
Self::new(
pb_range_manipulate.start,
pb_range_manipulate.end,
pb_range_manipulate.interval,
pb_range_manipulate.range,
pb_range_manipulate.time_index,
pb_range_manipulate.tag_columns,
placeholder_plan,
)
.context(DataFusionPlanningSnafu)
// Unlike `Self::new()`, this method doesn't check the input schema as it will fail
// because the input schema is empty.
// But this is Ok since datafusion guarantees to call `with_exprs_and_inputs` for the
// deserialized plan.
Ok(Self {
start: pb_range_manipulate.start,
end: pb_range_manipulate.end,
interval: pb_range_manipulate.interval,
range: pb_range_manipulate.range,
time_index: pb_range_manipulate.time_index,
field_columns: pb_range_manipulate.tag_columns,
input: placeholder_plan,
output_schema: empty_schema,
})
}
}
@@ -270,14 +276,19 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
fn with_exprs_and_inputs(
&self,
_exprs: Vec<Expr>,
inputs: Vec<LogicalPlan>,
mut inputs: Vec<LogicalPlan>,
) -> DataFusionResult<Self> {
if inputs.is_empty() {
if inputs.len() != 1 {
return Err(DataFusionError::Internal(
"RangeManipulate should have at least one input".to_string(),
"RangeManipulate should have at exact one input".to_string(),
));
}
let input: LogicalPlan = inputs.pop().unwrap();
let input_schema = input.schema();
let output_schema =
Self::calculate_output_schema(input_schema, &self.time_index, &self.field_columns)?;
Ok(Self {
start: self.start,
end: self.end,
@@ -285,8 +296,8 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
range: self.range,
time_index: self.time_index.clone(),
field_columns: self.field_columns.clone(),
input: inputs.into_iter().next().unwrap(),
output_schema: self.output_schema.clone(),
input,
output_schema,
})
}
}

View File

@@ -106,6 +106,10 @@ impl SeriesDivide {
})
}
pub fn tags(&self) -> &[String] {
&self.tag_columns
}
pub fn serialize(&self) -> Vec<u8> {
pb::SeriesDivide {
tag_columns: self.tag_columns.clone(),
@@ -315,7 +319,9 @@ impl Stream for SeriesDivideStream {
let next_batch = ready!(self.as_mut().fetch_next_batch(cx)).transpose()?;
let timer = std::time::Instant::now();
if let Some(next_batch) = next_batch {
self.buffer.push(next_batch);
if next_batch.num_rows() != 0 {
self.buffer.push(next_batch);
}
continue;
} else {
// input stream is ended

View File

@@ -40,7 +40,7 @@ pub use holt_winters::HoltWinters;
pub use idelta::IDelta;
pub use predict_linear::PredictLinear;
pub use quantile::QuantileOverTime;
pub use quantile_aggr::quantile_udaf;
pub use quantile_aggr::{quantile_udaf, QUANTILE_NAME};
pub use resets::Resets;
pub use round::Round;

View File

@@ -228,7 +228,7 @@ impl<const IS_COUNTER: bool, const IS_RATE: bool> ExtrapolatedRate<IS_COUNTER, I
// delta
impl ExtrapolatedRate<false, false> {
pub fn name() -> &'static str {
pub const fn name() -> &'static str {
"prom_delta"
}
@@ -239,7 +239,7 @@ impl ExtrapolatedRate<false, false> {
// rate
impl ExtrapolatedRate<true, true> {
pub fn name() -> &'static str {
pub const fn name() -> &'static str {
"prom_rate"
}
@@ -250,7 +250,7 @@ impl ExtrapolatedRate<true, true> {
// increase
impl ExtrapolatedRate<true, false> {
pub fn name() -> &'static str {
pub const fn name() -> &'static str {
"prom_increase"
}

View File

@@ -27,7 +27,7 @@ use datatypes::arrow::datatypes::{DataType, Field, Float64Type};
use crate::functions::quantile::quantile_impl;
const QUANTILE_NAME: &str = "quantile";
pub const QUANTILE_NAME: &str = "quantile";
const VALUES_FIELD_NAME: &str = "values";
const DEFAULT_LIST_FIELD_NAME: &str = "item";

View File

@@ -55,12 +55,16 @@ impl Categorizer {
LogicalPlan::Filter(filter) => Self::check_expr(&filter.predicate),
LogicalPlan::Window(_) => Commutativity::Unimplemented,
LogicalPlan::Aggregate(aggr) => {
if Self::check_partition(&aggr.group_expr, &partition_cols) {
return Commutativity::Commutative;
if !Self::check_partition(&aggr.group_expr, &partition_cols) {
return Commutativity::NonCommutative;
}
// check all children exprs and uses the strictest level
Commutativity::Unimplemented
for expr in &aggr.aggr_expr {
let commutativity = Self::check_expr(expr);
if !matches!(commutativity, Commutativity::Commutative) {
return commutativity;
}
}
Commutativity::Commutative
}
LogicalPlan::Sort(_) => {
if partition_cols.is_empty() {
@@ -94,7 +98,7 @@ impl Categorizer {
}
}
LogicalPlan::Extension(extension) => {
Self::check_extension_plan(extension.node.as_ref() as _)
Self::check_extension_plan(extension.node.as_ref() as _, &partition_cols)
}
LogicalPlan::Distinct(_) => {
if partition_cols.is_empty() {
@@ -116,13 +120,30 @@ impl Categorizer {
}
}
pub fn check_extension_plan(plan: &dyn UserDefinedLogicalNode) -> Commutativity {
pub fn check_extension_plan(
plan: &dyn UserDefinedLogicalNode,
partition_cols: &[String],
) -> Commutativity {
match plan.name() {
name if name == EmptyMetric::name()
name if name == SeriesDivide::name() => {
let series_divide = plan.as_any().downcast_ref::<SeriesDivide>().unwrap();
let tags = series_divide.tags().iter().collect::<HashSet<_>>();
for partition_col in partition_cols {
if !tags.contains(partition_col) {
return Commutativity::NonCommutative;
}
}
Commutativity::Commutative
}
name if name == SeriesNormalize::name()
|| name == InstantManipulate::name()
|| name == SeriesNormalize::name()
|| name == RangeManipulate::name()
|| name == SeriesDivide::name()
|| name == RangeManipulate::name() =>
{
// They should always follows Series Divide.
// Either all commutative or all non-commutative (which will be blocked by SeriesDivide).
Commutativity::Commutative
}
name if name == EmptyMetric::name()
|| name == MergeScanLogicalPlan::name()
|| name == MergeSortLogicalPlan::name() =>
{
@@ -148,8 +169,9 @@ impl Categorizer {
| Expr::Negative(_)
| Expr::Between(_)
| Expr::Exists(_)
| Expr::InList(_)
| Expr::ScalarFunction(_) => Commutativity::Commutative,
| Expr::InList(_) => Commutativity::Commutative,
Expr::ScalarFunction(_udf) => Commutativity::Commutative,
Expr::AggregateFunction(_udaf) => Commutativity::Commutative,
Expr::Like(_)
| Expr::SimilarTo(_)
@@ -158,7 +180,6 @@ impl Categorizer {
| Expr::Case(_)
| Expr::Cast(_)
| Expr::TryCast(_)
| Expr::AggregateFunction(_)
| Expr::WindowFunction(_)
| Expr::InSubquery(_)
| Expr::ScalarSubquery(_)

View File

@@ -14,7 +14,6 @@
#![feature(let_chains)]
#![feature(int_roundings)]
#![feature(trait_upcasting)]
#![feature(try_blocks)]
#![feature(stmt_expr_attributes)]
#![feature(iterator_try_collect)]

View File

@@ -62,21 +62,28 @@ impl ParallelizeScan {
} else if let Some(region_scan_exec) =
plan.as_any().downcast_ref::<RegionScanExec>()
{
let expected_partition_num = config.execution.target_partitions;
if region_scan_exec.is_partition_set() {
return Ok(Transformed::no(plan));
}
// don't parallelize if we want per series distribution
if matches!(
region_scan_exec.distribution(),
Some(TimeSeriesDistribution::PerSeries)
) {
return Ok(Transformed::no(plan));
let partition_range = region_scan_exec.get_partition_ranges();
// HACK: Allocate expected_partition_num empty partitions to indicate
// the expected partition number.
let mut new_partitions = vec![vec![]; expected_partition_num];
new_partitions[0] = partition_range;
let new_plan = region_scan_exec
.with_new_partitions(new_partitions, expected_partition_num)
.map_err(|e| DataFusionError::External(e.into_inner()))?;
return Ok(Transformed::yes(Arc::new(new_plan)));
}
let ranges = region_scan_exec.get_partition_ranges();
let total_range_num = ranges.len();
let expected_partition_num = config.execution.target_partitions;
// assign ranges to each partition
let mut partition_ranges =
@@ -131,26 +138,18 @@ impl ParallelizeScan {
) -> Vec<Vec<PartitionRange>> {
if ranges.is_empty() {
// Returns a single partition with no range.
return vec![vec![]];
return vec![vec![]; expected_partition_num];
}
if ranges.len() == 1 {
return vec![ranges];
let mut vec = vec![vec![]; expected_partition_num];
vec[0] = ranges;
return vec;
}
// Sort ranges by number of rows in descending order.
ranges.sort_by(|a, b| b.num_rows.cmp(&a.num_rows));
// Get the max row number of the ranges. Note that the number of rows may be 0 if statistics are not available.
let max_rows = ranges[0].num_rows;
let total_rows = ranges.iter().map(|range| range.num_rows).sum::<usize>();
// Computes the partition num by the max row number. This eliminates the unbalance of the partitions.
let balanced_partition_num = if max_rows > 0 {
total_rows.div_ceil(max_rows)
} else {
ranges.len()
};
let actual_partition_num = expected_partition_num.min(balanced_partition_num).max(1);
let mut partition_ranges = vec![vec![]; actual_partition_num];
let mut partition_ranges = vec![vec![]; expected_partition_num];
#[derive(Eq, PartialEq)]
struct HeapNode {
@@ -172,7 +171,7 @@ impl ParallelizeScan {
}
let mut part_heap =
BinaryHeap::from_iter((0..actual_partition_num).map(|partition_idx| HeapNode {
BinaryHeap::from_iter((0..expected_partition_num).map(|partition_idx| HeapNode {
num_rows: 0,
partition_idx,
}));
@@ -263,7 +262,7 @@ mod test {
];
assert_eq!(result, expected);
// assign 4 ranges to 5 partitions. Only 4 partitions are returned.
// assign 4 ranges to 5 partitions.
let expected_partition_num = 5;
let result = ParallelizeScan::assign_partition_range(ranges, expected_partition_num);
let expected = vec![
@@ -273,32 +272,31 @@ mod test {
num_rows: 250,
identifier: 4,
}],
vec![PartitionRange {
start: Timestamp::new(0, TimeUnit::Second),
end: Timestamp::new(10, TimeUnit::Second),
num_rows: 100,
identifier: 1,
}],
vec![PartitionRange {
start: Timestamp::new(10, TimeUnit::Second),
end: Timestamp::new(20, TimeUnit::Second),
num_rows: 200,
identifier: 2,
}],
vec![
PartitionRange {
start: Timestamp::new(20, TimeUnit::Second),
end: Timestamp::new(30, TimeUnit::Second),
num_rows: 150,
identifier: 3,
},
PartitionRange {
start: Timestamp::new(0, TimeUnit::Second),
end: Timestamp::new(10, TimeUnit::Second),
num_rows: 100,
identifier: 1,
},
],
vec![],
vec![PartitionRange {
start: Timestamp::new(20, TimeUnit::Second),
end: Timestamp::new(30, TimeUnit::Second),
num_rows: 150,
identifier: 3,
}],
];
assert_eq!(result, expected);
// assign 0 ranges to 5 partitions. Only 1 partition is returned.
// assign 0 ranges to 5 partitions. Should return 5 empty ranges.
let result = ParallelizeScan::assign_partition_range(vec![], 5);
assert_eq!(result.len(), 1);
assert_eq!(result.len(), 5);
}
#[test]

View File

@@ -348,7 +348,7 @@ impl PartSortStream {
&self,
sort_column: &ArrayRef,
) -> datafusion_common::Result<Option<usize>> {
if sort_column.len() == 0 {
if sort_column.is_empty() {
return Ok(Some(0));
}

View File

@@ -29,6 +29,11 @@ use datafusion::execution::{FunctionRegistry, SessionStateBuilder};
use datafusion::logical_expr::LogicalPlan;
use datafusion_expr::UserDefinedLogicalNode;
use greptime_proto::substrait_extension::MergeScan as PbMergeScan;
use promql::functions::{
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, IDelta, Increase,
LastOverTime, MaxOverTime, MinOverTime, PresentOverTime, Rate, Resets, StddevOverTime,
StdvarOverTime, SumOverTime,
};
use prost::Message;
use session::context::QueryContextRef;
use snafu::ResultExt;
@@ -132,6 +137,26 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl()));
let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl()));
let _ = session_state.register_udaf(Arc::new(GeoPathAccumulator::udf_impl()));
let _ = session_state.register_udf(Arc::new(IDelta::<false>::scalar_udf()));
let _ = session_state.register_udf(Arc::new(IDelta::<true>::scalar_udf()));
let _ = session_state.register_udf(Arc::new(Rate::scalar_udf()));
let _ = session_state.register_udf(Arc::new(Increase::scalar_udf()));
let _ = session_state.register_udf(Arc::new(Delta::scalar_udf()));
let _ = session_state.register_udf(Arc::new(Resets::scalar_udf()));
let _ = session_state.register_udf(Arc::new(Changes::scalar_udf()));
let _ = session_state.register_udf(Arc::new(Deriv::scalar_udf()));
let _ = session_state.register_udf(Arc::new(AvgOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(MinOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(MaxOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(SumOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(CountOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(LastOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(AbsentOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(PresentOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(StddevOverTime::scalar_udf()));
let _ = session_state.register_udf(Arc::new(StdvarOverTime::scalar_udf()));
// TODO(ruihang): add quantile_over_time, predict_linear, holt_winters, round
}
let logical_plan = DFLogicalSubstraitConvertor
.decode(message, session_state)

View File

@@ -31,6 +31,7 @@ use datafusion::error::Result as DfResult;
use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionContext, SessionState};
use datafusion::execution::runtime_env::RuntimeEnv;
use datafusion::execution::SessionStateBuilder;
use datafusion::physical_optimizer::enforce_sorting::EnforceSorting;
use datafusion::physical_optimizer::optimizer::PhysicalOptimizer;
use datafusion::physical_optimizer::sanity_checker::SanityCheckPlan;
use datafusion::physical_optimizer::PhysicalOptimizerRule;
@@ -142,6 +143,9 @@ impl QueryEngineState {
physical_optimizer
.rules
.insert(1, Arc::new(PassDistribution));
physical_optimizer
.rules
.insert(2, Arc::new(EnforceSorting {}));
// Add rule for windowed sort
physical_optimizer
.rules

View File

@@ -117,7 +117,7 @@ where
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.inner.poll_ready(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
Poll::Ready(r) => Poll::Ready(r),
}
}

View File

@@ -17,7 +17,6 @@
#![feature(exclusive_wrapper)]
#![feature(let_chains)]
#![feature(if_let_guard)]
#![feature(trait_upcasting)]
use datafusion_expr::LogicalPlan;
use datatypes::schema::Schema;

View File

@@ -55,7 +55,7 @@ pub fn transform_statements(stmts: &mut Vec<Statement>) -> Result<()> {
}
}
visit_expressions_mut(stmts, |expr| {
let _ = visit_expressions_mut(stmts, |expr| {
for rule in RULES.iter() {
rule.visit_expr(expr)?;
}

View File

@@ -290,7 +290,7 @@ impl RegionMetadata {
pub fn project(&self, projection: &[ColumnId]) -> Result<RegionMetadata> {
// check time index
ensure!(
projection.iter().any(|id| *id == self.time_index),
projection.contains(&self.time_index),
TimeIndexNotFoundSnafu
);

View File

@@ -14,6 +14,7 @@
#![feature(assert_matches)]
#![feature(try_blocks)]
#![feature(let_chains)]
pub mod dist_table;
pub mod error;

View File

@@ -95,7 +95,7 @@ impl TableProvider for DfTableProviderAdapter {
filters: &[Expr],
limit: Option<usize>,
) -> DfResult<Arc<dyn ExecutionPlan>> {
let filters: Vec<Expr> = filters.iter().map(Clone::clone).map(Into::into).collect();
let filters: Vec<Expr> = filters.iter().map(Clone::clone).collect();
let request = {
let mut request = self.scan_req.lock().unwrap();
request.filters = filters;

View File

@@ -82,11 +82,17 @@ impl RegionScanExec {
if scanner.properties().is_logical_region() {
pk_names.sort_unstable();
}
let mut pk_columns: Vec<PhysicalSortExpr> = pk_names
.into_iter()
let pk_columns = pk_names
.iter()
.filter_map(
|col| Some(Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _),
)
.collect::<Vec<_>>();
let mut pk_sort_columns: Vec<PhysicalSortExpr> = pk_names
.iter()
.filter_map(|col| {
Some(PhysicalSortExpr::new(
Arc::new(Column::new_with_schema(&col, &arrow_schema).ok()?) as _,
Arc::new(Column::new_with_schema(col, &arrow_schema).ok()?) as _,
SortOptions {
descending: false,
nulls_first: true,
@@ -113,28 +119,37 @@ impl RegionScanExec {
let eq_props = match request.distribution {
Some(TimeSeriesDistribution::PerSeries) => {
if let Some(ts) = ts_col {
pk_columns.push(ts);
pk_sort_columns.push(ts);
}
EquivalenceProperties::new_with_orderings(
arrow_schema.clone(),
&[LexOrdering::new(pk_columns)],
&[LexOrdering::new(pk_sort_columns)],
)
}
Some(TimeSeriesDistribution::TimeWindowed) => {
if let Some(ts_col) = ts_col {
pk_columns.insert(0, ts_col);
pk_sort_columns.insert(0, ts_col);
}
EquivalenceProperties::new_with_orderings(
arrow_schema.clone(),
&[LexOrdering::new(pk_columns)],
&[LexOrdering::new(pk_sort_columns)],
)
}
None => EquivalenceProperties::new(arrow_schema.clone()),
};
let partitioning = match request.distribution {
Some(TimeSeriesDistribution::PerSeries) => {
Partitioning::Hash(pk_columns.clone(), num_output_partition)
}
Some(TimeSeriesDistribution::TimeWindowed) | None => {
Partitioning::UnknownPartitioning(num_output_partition)
}
};
let properties = PlanProperties::new(
eq_props,
Partitioning::UnknownPartitioning(num_output_partition),
partitioning,
EmissionType::Incremental,
Boundedness::Bounded,
);
@@ -188,9 +203,14 @@ impl RegionScanExec {
warn!("Setting partition ranges more than once for RegionScanExec");
}
let num_partitions = partitions.len();
let mut properties = self.properties.clone();
properties.partitioning = Partitioning::UnknownPartitioning(num_partitions);
let new_partitioning = match properties.partitioning {
Partitioning::Hash(ref columns, _) => {
Partitioning::Hash(columns.clone(), target_partitions)
}
_ => Partitioning::UnknownPartitioning(target_partitions),
};
properties.partitioning = new_partitioning;
{
let mut scanner = self.scanner.lock().unwrap();

View File

@@ -85,11 +85,7 @@ pub struct UnstableTestVariables {
pub fn load_unstable_test_env_variables() -> UnstableTestVariables {
let _ = dotenv::dotenv();
let binary_path = env::var(GT_FUZZ_BINARY_PATH).expect("GT_FUZZ_BINARY_PATH not found");
let root_dir = if let Ok(root) = env::var(GT_FUZZ_INSTANCE_ROOT_DIR) {
Some(root)
} else {
None
};
let root_dir = env::var(GT_FUZZ_INSTANCE_ROOT_DIR).ok();
UnstableTestVariables {
binary_path,

View File

@@ -157,7 +157,7 @@ async fn execute_unstable_create_table(
}
Err(err) => {
// FIXME(weny): support to retry it later.
if matches!(err, sqlx::Error::PoolTimedOut { .. }) {
if matches!(err, sqlx::Error::PoolTimedOut) {
warn!("ignore pool timeout, sql: {sql}");
continue;
}

View File

@@ -489,10 +489,7 @@ async fn create_datanode_client(datanode: &Datanode) -> (String, Client) {
if let Some(client) = client {
Ok(TokioIo::new(client))
} else {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Client already taken",
))
Err(std::io::Error::other("Client already taken"))
}
}
}),

View File

@@ -8,7 +8,7 @@ CREATE TABLE distinct_basic (
Affected Rows: 0
-- should fallback to streaming mode
-- should fail
-- SQLNESS REPLACE id=\d+ id=REDACTED
CREATE FLOW test_distinct_basic SINK TO out_distinct_basic AS
SELECT
@@ -16,151 +16,9 @@ SELECT
FROM
distinct_basic;
Affected Rows: 0
Error: 3001(EngineExecuteQuery), Unsupported: Source table `greptime.public.distinct_basic`(id=REDACTED) has instant TTL, Instant TTL is not supported under batching mode. Consider using a TTL longer than flush interval
-- flow_options should have a flow_type:streaming
-- since source table's ttl=instant
SELECT flow_name, options FROM INFORMATION_SCHEMA.FLOWS;
+---------------------+---------------------------+
| flow_name | options |
+---------------------+---------------------------+
| test_distinct_basic | {"flow_type":"streaming"} |
+---------------------+---------------------------+
SHOW CREATE TABLE distinct_basic;
+----------------+-----------------------------------------------------------+
| Table | Create Table |
+----------------+-----------------------------------------------------------+
| distinct_basic | CREATE TABLE IF NOT EXISTS "distinct_basic" ( |
| | "number" INT NULL, |
| | "ts" TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(), |
| | TIME INDEX ("ts"), |
| | PRIMARY KEY ("number") |
| | ) |
| | |
| | ENGINE=mito |
| | WITH( |
| | ttl = 'instant' |
| | ) |
+----------------+-----------------------------------------------------------+
SHOW CREATE TABLE out_distinct_basic;
+--------------------+---------------------------------------------------+
| Table | Create Table |
+--------------------+---------------------------------------------------+
| out_distinct_basic | CREATE TABLE IF NOT EXISTS "out_distinct_basic" ( |
| | "dis" INT NULL, |
| | "update_at" TIMESTAMP(3) NULL, |
| | "__ts_placeholder" TIMESTAMP(3) NOT NULL, |
| | TIME INDEX ("__ts_placeholder"), |
| | PRIMARY KEY ("dis") |
| | ) |
| | |
| | ENGINE=mito |
| | |
+--------------------+---------------------------------------------------+
-- SQLNESS SLEEP 3s
INSERT INTO
distinct_basic
VALUES
(20, "2021-07-01 00:00:00.200"),
(20, "2021-07-01 00:00:00.200"),
(22, "2021-07-01 00:00:00.600");
Affected Rows: 0
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('test_distinct_basic');
+-----------------------------------------+
| ADMIN FLUSH_FLOW('test_distinct_basic') |
+-----------------------------------------+
| FLOW_FLUSHED |
+-----------------------------------------+
SELECT
dis
FROM
out_distinct_basic;
+-----+
| dis |
+-----+
| 20 |
| 22 |
+-----+
SELECT number FROM distinct_basic;
++
++
-- SQLNESS SLEEP 6s
ADMIN FLUSH_TABLE('distinct_basic');
+-------------------------------------+
| ADMIN FLUSH_TABLE('distinct_basic') |
+-------------------------------------+
| 0 |
+-------------------------------------+
INSERT INTO
distinct_basic
VALUES
(23, "2021-07-01 00:00:01.600");
Affected Rows: 0
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('test_distinct_basic');
+-----------------------------------------+
| ADMIN FLUSH_FLOW('test_distinct_basic') |
+-----------------------------------------+
| FLOW_FLUSHED |
+-----------------------------------------+
SELECT
dis
FROM
out_distinct_basic;
+-----+
| dis |
+-----+
| 20 |
| 22 |
| 23 |
+-----+
SELECT number FROM distinct_basic;
++
++
DROP FLOW test_distinct_basic;
Affected Rows: 0
DROP TABLE distinct_basic;
Affected Rows: 0
DROP TABLE out_distinct_basic;
Affected Rows: 0
-- test ttl = 5s
CREATE TABLE distinct_basic (
number INT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(number),
TIME INDEX(ts)
)WITH ('ttl' = '5s');
ALTER TABLE distinct_basic SET 'ttl' = '5s';
Affected Rows: 0
@@ -172,26 +30,7 @@ FROM
Affected Rows: 0
-- flow_options should have a flow_type:batching
-- since source table's ttl=instant
SELECT flow_name, options FROM INFORMATION_SCHEMA.FLOWS;
+---------------------+--------------------------+
| flow_name | options |
+---------------------+--------------------------+
| test_distinct_basic | {"flow_type":"batching"} |
+---------------------+--------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
INSERT INTO
distinct_basic
VALUES
@@ -291,6 +130,41 @@ ADMIN FLUSH_FLOW('test_distinct_basic');
| FLOW_FLUSHED |
+-----------------------------------------+
SHOW CREATE TABLE distinct_basic;
+----------------+-----------------------------------------------------------+
| Table | Create Table |
+----------------+-----------------------------------------------------------+
| distinct_basic | CREATE TABLE IF NOT EXISTS "distinct_basic" ( |
| | "number" INT NULL, |
| | "ts" TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(), |
| | TIME INDEX ("ts"), |
| | PRIMARY KEY ("number") |
| | ) |
| | |
| | ENGINE=mito |
| | WITH( |
| | ttl = '5s' |
| | ) |
+----------------+-----------------------------------------------------------+
SHOW CREATE TABLE out_distinct_basic;
+--------------------+---------------------------------------------------+
| Table | Create Table |
+--------------------+---------------------------------------------------+
| out_distinct_basic | CREATE TABLE IF NOT EXISTS "out_distinct_basic" ( |
| | "dis" INT NULL, |
| | "update_at" TIMESTAMP(3) NULL, |
| | "__ts_placeholder" TIMESTAMP(3) NOT NULL, |
| | TIME INDEX ("__ts_placeholder"), |
| | PRIMARY KEY ("dis") |
| | ) |
| | |
| | ENGINE=mito |
| | |
+--------------------+---------------------------------------------------+
SELECT
dis
FROM

View File

@@ -6,7 +6,7 @@ CREATE TABLE distinct_basic (
TIME INDEX(ts)
)WITH ('ttl' = 'instant');
-- should fallback to streaming mode
-- should fail
-- SQLNESS REPLACE id=\d+ id=REDACTED
CREATE FLOW test_distinct_basic SINK TO out_distinct_basic AS
SELECT
@@ -14,61 +14,7 @@ SELECT
FROM
distinct_basic;
-- flow_options should have a flow_type:streaming
-- since source table's ttl=instant
SELECT flow_name, options FROM INFORMATION_SCHEMA.FLOWS;
SHOW CREATE TABLE distinct_basic;
SHOW CREATE TABLE out_distinct_basic;
-- SQLNESS SLEEP 3s
INSERT INTO
distinct_basic
VALUES
(20, "2021-07-01 00:00:00.200"),
(20, "2021-07-01 00:00:00.200"),
(22, "2021-07-01 00:00:00.600");
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('test_distinct_basic');
SELECT
dis
FROM
out_distinct_basic;
SELECT number FROM distinct_basic;
-- SQLNESS SLEEP 6s
ADMIN FLUSH_TABLE('distinct_basic');
INSERT INTO
distinct_basic
VALUES
(23, "2021-07-01 00:00:01.600");
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('test_distinct_basic');
SELECT
dis
FROM
out_distinct_basic;
SELECT number FROM distinct_basic;
DROP FLOW test_distinct_basic;
DROP TABLE distinct_basic;
DROP TABLE out_distinct_basic;
-- test ttl = 5s
CREATE TABLE distinct_basic (
number INT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(number),
TIME INDEX(ts)
)WITH ('ttl' = '5s');
ALTER TABLE distinct_basic SET 'ttl' = '5s';
CREATE FLOW test_distinct_basic SINK TO out_distinct_basic AS
SELECT
@@ -76,14 +22,7 @@ SELECT
FROM
distinct_basic;
-- flow_options should have a flow_type:batching
-- since source table's ttl=instant
SELECT flow_name, options FROM INFORMATION_SCHEMA.FLOWS;
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
INSERT INTO
distinct_basic
VALUES
@@ -116,6 +55,10 @@ VALUES
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('test_distinct_basic');
SHOW CREATE TABLE distinct_basic;
SHOW CREATE TABLE out_distinct_basic;
SELECT
dis
FROM

View File

@@ -44,15 +44,6 @@ ADMIN FLUSH_FLOW('test_numbers_basic');
+----------------------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
SHOW CREATE TABLE out_num_cnt_basic;
+-------------------+--------------------------------------------------+
@@ -110,16 +101,6 @@ GROUP BY
Affected Rows: 0
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
SHOW CREATE TABLE out_num_cnt_basic;
+-------------------+--------------------------------------------------+
@@ -137,15 +118,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
+-------------------+--------------------------------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
SHOW CREATE FLOW test_numbers_basic;
+--------------------+---------------------------------------------------------------------------------------+

View File

@@ -20,9 +20,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
ADMIN FLUSH_FLOW('test_numbers_basic');
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
SHOW CREATE TABLE out_num_cnt_basic;
SHOW CREATE FLOW test_numbers_basic;
@@ -47,16 +44,10 @@ FROM
numbers_input_basic
GROUP BY
ts;
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
SHOW CREATE TABLE out_num_cnt_basic;
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
SHOW CREATE FLOW test_numbers_basic;
SHOW CREATE TABLE out_num_cnt_basic;

View File

@@ -62,15 +62,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
+-------------------+--------------------------------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
INSERT INTO
numbers_input_basic
VALUES
@@ -215,15 +206,6 @@ SHOW CREATE TABLE out_basic;
+-----------+---------------------------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
INSERT INTO
input_basic
VALUES
@@ -324,15 +306,6 @@ ADMIN FLUSH_FLOW('test_distinct_basic');
+-----------------------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
INSERT INTO
distinct_basic
VALUES
@@ -1692,15 +1665,6 @@ ADMIN FLUSH_FLOW('test_numbers_basic');
+----------------------------------------+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
INSERT INTO
numbers_input_basic
VALUES

View File

@@ -24,9 +24,6 @@ ADMIN FLUSH_FLOW('test_numbers_basic');
SHOW CREATE TABLE out_num_cnt_basic;
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
INSERT INTO
numbers_input_basic
VALUES
@@ -94,9 +91,6 @@ FROM
SHOW CREATE TABLE out_basic;
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
INSERT INTO
input_basic
VALUES
@@ -136,9 +130,6 @@ SHOW CREATE TABLE out_distinct_basic;
ADMIN FLUSH_FLOW('test_distinct_basic');
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
INSERT INTO
distinct_basic
VALUES
@@ -797,9 +788,6 @@ SHOW CREATE TABLE out_num_cnt_basic;
ADMIN FLUSH_FLOW('test_numbers_basic');
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
INSERT INTO
numbers_input_basic
VALUES

View File

@@ -730,21 +730,10 @@ SELECT key FROM api_stats;
+-----+
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 5s
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '2', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
Affected Rows: 1
-- wait more time so flownode have time to recover flows
-- SQLNESS SLEEP 5s
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('api_stats_flow');

View File

@@ -399,13 +399,8 @@ ADMIN FLUSH_FLOW('api_stats_flow');
SELECT key FROM api_stats;
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 5s
INSERT INTO `api_log` (`time`, `key`, `status_code`, `method`, `path`, `raw_query`, `user_agent`, `client_ip`, `duration`, `count`) VALUES (now(), '2', 0, 'GET', '/lightning/v1/query', 'key=1&since=600', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', '1', 21, 1);
-- wait more time so flownode have time to recover flows
-- SQLNESS SLEEP 5s
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('api_stats_flow');

View File

@@ -50,7 +50,6 @@ ADMIN FLUSH_FLOW('calc_access_log_10s');
+-----------------------------------------+
-- query should return 3 rows
-- SQLNESS SORT_RESULT 3 1
SELECT "url", time_window FROM access_log_10s
ORDER BY
time_window;
@@ -64,7 +63,6 @@ ORDER BY
+------------+---------------------+
-- use hll_count to query the approximate data in access_log_10s
-- SQLNESS SORT_RESULT 3 1
SELECT "url", time_window, hll_count(state) FROM access_log_10s
ORDER BY
time_window;
@@ -78,7 +76,6 @@ ORDER BY
+------------+---------------------+---------------------------------+
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
-- SQLNESS SORT_RESULT 3 1
SELECT
"url",
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,
@@ -94,8 +91,8 @@ ORDER BY
+------------+---------------------+------------+
| url | time_window_1m | uv_per_min |
+------------+---------------------+------------+
| /dashboard | 2025-03-04T00:00:00 | 3 |
| /not_found | 2025-03-04T00:00:00 | 1 |
| /dashboard | 2025-03-04T00:00:00 | 3 |
+------------+---------------------+------------+
DROP FLOW calc_access_log_10s;

View File

@@ -36,19 +36,16 @@ INSERT INTO access_log VALUES
ADMIN FLUSH_FLOW('calc_access_log_10s');
-- query should return 3 rows
-- SQLNESS SORT_RESULT 3 1
SELECT "url", time_window FROM access_log_10s
ORDER BY
time_window;
-- use hll_count to query the approximate data in access_log_10s
-- SQLNESS SORT_RESULT 3 1
SELECT "url", time_window, hll_count(state) FROM access_log_10s
ORDER BY
time_window;
-- further, we can aggregate 10 seconds of data to every minute, by using hll_merge to merge 10 seconds of hyperloglog state
-- SQLNESS SORT_RESULT 3 1
SELECT
"url",
date_bin('1 minute'::INTERVAL, time_window) AS time_window_1m,

View File

@@ -263,15 +263,6 @@ SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORM
-- makesure after recover should be the same
-- SQLNESS ARG restart=true
SELECT 1;
+----------+
| Int64(1) |
+----------+
| 1 |
+----------+
-- SQLNESS SLEEP 3s
SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORMATION_SCHEMA.FLOWS WHERE flow_name='filter_numbers_show';
+---------------------+---------------+-------------------------------------------------------------+------------------------------------+

View File

@@ -108,9 +108,6 @@ SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORM
-- makesure after recover should be the same
-- SQLNESS ARG restart=true
SELECT 1;
-- SQLNESS SLEEP 3s
SELECT flow_name, table_catalog, flow_definition, source_table_names FROM INFORMATION_SCHEMA.FLOWS WHERE flow_name='filter_numbers_show';

View File

@@ -9,7 +9,7 @@ select GREATEST('1999-01-30', '2023-03-01');
+-------------------------------------------------+
| greatest(Utf8("1999-01-30"),Utf8("2023-03-01")) |
+-------------------------------------------------+
| 2023-03-01 |
| 2023-03-01T00:00:00 |
+-------------------------------------------------+
select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);

View File

@@ -130,8 +130,7 @@ tql eval (3000, 3000, '1s') label_replace(histogram_quantile(0.8, histogram_buck
-- quantile with rate is covered in other cases
tql eval (3000, 3000, '1s') histogram_quantile(0.2, rate(histogram_bucket[5m]));
++
++
Error: 3001(EngineExecuteQuery), Unsupported arrow data type, type: Dictionary(Int64, Float64)
drop table histogram_bucket;

View File

@@ -1,158 +0,0 @@
CREATE TABLE IF NOT EXISTS `test_multi_pk_filter` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `flag` INT NULL, `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`, `flag`) ) ENGINE=mito;
Affected Rows: 0
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:00:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 0, 421, '2023-05-15 10:05:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'dev', 1, 356, '2023-05-15 10:10:00');
Affected Rows: 1
ADMIN FLUSH_TABLE('test_multi_pk_filter');
+-------------------------------------------+
| ADMIN FLUSH_TABLE('test_multi_pk_filter') |
+-------------------------------------------+
| 0 |
+-------------------------------------------+
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'dev', 1, 412, '2023-05-15 10:15:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'dev', 1, 298, '2023-05-15 10:20:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:25:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 5874, '2023-05-15 10:30:00');
Affected Rows: 1
ADMIN FLUSH_TABLE('test_multi_pk_filter');
+-------------------------------------------+
| ADMIN FLUSH_TABLE('test_multi_pk_filter') |
+-------------------------------------------+
| 0 |
+-------------------------------------------+
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 6132, '2023-05-15 10:35:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'testing', 1, 1287, '2023-05-15 10:40:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'testing', 1, 1432, '2023-05-15 10:45:00');
Affected Rows: 1
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'testing', 1, 1056, '2023-05-15 10:50:00');
Affected Rows: 1
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2'
ORDER BY greptime_timestamp;
+---------------------+---------------+------------+-------+
| greptime_timestamp | namespace | env | total |
+---------------------+---------------+------------+-------+
| 2023-05-15T10:00:00 | thermostat_v2 | production | 5289 |
| 2023-05-15T10:10:00 | thermostat_v2 | dev | 356 |
| 2023-05-15T10:15:00 | thermostat_v2 | dev | 412 |
| 2023-05-15T10:20:00 | thermostat_v2 | dev | 298 |
| 2023-05-15T10:25:00 | thermostat_v2 | production | 5289 |
| 2023-05-15T10:30:00 | thermostat_v2 | production | 5874 |
| 2023-05-15T10:35:00 | thermostat_v2 | production | 6132 |
| 2023-05-15T10:40:00 | thermostat_v2 | testing | 1287 |
| 2023-05-15T10:45:00 | thermostat_v2 | testing | 1432 |
| 2023-05-15T10:50:00 | thermostat_v2 | testing | 1056 |
+---------------------+---------------+------------+-------+
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2' AND env='dev'
ORDER BY greptime_timestamp;
+---------------------+---------------+-----+-------+
| greptime_timestamp | namespace | env | total |
+---------------------+---------------+-----+-------+
| 2023-05-15T10:10:00 | thermostat_v2 | dev | 356 |
| 2023-05-15T10:15:00 | thermostat_v2 | dev | 412 |
| 2023-05-15T10:20:00 | thermostat_v2 | dev | 298 |
+---------------------+---------------+-----+-------+
DROP TABLE test_multi_pk_filter;
Affected Rows: 0
CREATE TABLE IF NOT EXISTS `test_multi_pk_null` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`) ) ENGINE=mito;
Affected Rows: 0
INSERT INTO test_multi_pk_null
(namespace, env, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 5289, '2023-05-15 10:00:00');
Affected Rows: 1
INSERT INTO test_multi_pk_null
(namespace, env, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 421, '2023-05-15 10:05:00');
Affected Rows: 1
ADMIN FLUSH_TABLE('test_multi_pk_null');
+-----------------------------------------+
| ADMIN FLUSH_TABLE('test_multi_pk_null') |
+-----------------------------------------+
| 0 |
+-----------------------------------------+
SELECT * FROM test_multi_pk_null WHERE env IS NOT NULL;
+---------------+------------+-------+---------------------+
| namespace | env | total | greptime_timestamp |
+---------------+------------+-------+---------------------+
| thermostat_v2 | production | 5289 | 2023-05-15T10:00:00 |
| thermostat_v2 | production | 421 | 2023-05-15T10:05:00 |
+---------------+------------+-------+---------------------+
DROP TABLE test_multi_pk_null;
Affected Rows: 0

View File

@@ -1,66 +0,0 @@
CREATE TABLE IF NOT EXISTS `test_multi_pk_filter` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `flag` INT NULL, `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`, `flag`) ) ENGINE=mito;
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:00:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 0, 421, '2023-05-15 10:05:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'dev', 1, 356, '2023-05-15 10:10:00');
ADMIN FLUSH_TABLE('test_multi_pk_filter');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'dev', 1, 412, '2023-05-15 10:15:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'dev', 1, 298, '2023-05-15 10:20:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 5289, '2023-05-15 10:25:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 5874, '2023-05-15 10:30:00');
ADMIN FLUSH_TABLE('test_multi_pk_filter');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 1, 6132, '2023-05-15 10:35:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'testing', 1, 1287, '2023-05-15 10:40:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'testing', 1, 1432, '2023-05-15 10:45:00');
INSERT INTO test_multi_pk_filter
(namespace, env, flag, total, greptime_timestamp)
VALUES ('thermostat_v2', 'testing', 1, 1056, '2023-05-15 10:50:00');
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2'
ORDER BY greptime_timestamp;
SELECT greptime_timestamp, namespace, env, total FROM test_multi_pk_filter WHERE
greptime_timestamp BETWEEN '2023-05-15 10:00:00' AND '2023-05-15 11:00:00' AND flag = 1 AND namespace = 'thermostat_v2' AND env='dev'
ORDER BY greptime_timestamp;
DROP TABLE test_multi_pk_filter;
CREATE TABLE IF NOT EXISTS `test_multi_pk_null` ( `namespace` STRING NULL, `env` STRING NULL DEFAULT 'NULL', `total` BIGINT NULL, `greptime_timestamp` TIMESTAMP(9) NOT NULL, TIME INDEX (`greptime_timestamp`), PRIMARY KEY (`namespace`, `env`) ) ENGINE=mito;
INSERT INTO test_multi_pk_null
(namespace, env, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 5289, '2023-05-15 10:00:00');
INSERT INTO test_multi_pk_null
(namespace, env, total, greptime_timestamp)
VALUES ('thermostat_v2', 'production', 421, '2023-05-15 10:05:00');
ADMIN FLUSH_TABLE('test_multi_pk_null');
SELECT * FROM test_multi_pk_null WHERE env IS NOT NULL;
DROP TABLE test_multi_pk_null;

View File

@@ -17,11 +17,14 @@ tql analyze (1, 3, '1s') t1{ a = "a" };
+-+-+-+
| stage | node | plan_|
+-+-+-+
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|_|_|_MergeScanExec: REDACTED
| 0_| 0_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|_|_|_|
|_|_| Total rows: 3_|
+-+-+-+
@@ -37,11 +40,14 @@ tql analyze (1, 3, '1s') t1{ a =~ ".*" };
+-+-+-+
| stage | node | plan_|
+-+-+-+
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|_|_|_MergeScanExec: REDACTED
| 0_| 0_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|_|_|_|
|_|_| Total rows: 6_|
+-+-+-+
@@ -57,11 +63,14 @@ tql analyze (1, 3, '1s') t1{ a =~ "a.*" };
+-+-+-+
| stage | node | plan_|
+-+-+-+
| 0_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|_|_|_MergeScanExec: REDACTED
| 0_| 0_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
| 1_| 0_|_PromInstantManipulateExec: range=[1000..3000], lookback=[300000], interval=[1000], time index=[b] REDACTED
|_|_|_PromSeriesDivideExec: tags=["a"] REDACTED
|_|_|_SortExec: expr=[a@0 ASC], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_RepartitionExec: partitioning=Hash([a@0], 32), input_partitions=1 REDACTED
|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file 0 ranges), distribution=PerSeries REDACTED
|_|_|_|
|_|_| Total rows: 3_|
+-+-+-+

View File

@@ -3,7 +3,7 @@ create database pg_catalog;
Error: 1004(InvalidArguments), Schema pg_catalog already exists
-- session_user because session_user is based on the current user so is not null is for test
-- session_user because session_user is based on the current user so is not null is for test
-- SQLNESS PROTOCOL POSTGRES
SELECT session_user is not null;
@@ -107,12 +107,13 @@ select * from pg_catalog.pg_type order by oid;
+-----+-----------+--------+
-- SQLNESS PROTOCOL POSTGRES
-- SQLNESS REPLACE (\d+\s*) OID
select * from pg_catalog.pg_database where datname = 'public';
+------------+---------+
| oid | datname |
+------------+---------+
| 3927743705 | public |
| OID| public |
+------------+---------+
-- \d
@@ -159,15 +160,16 @@ ORDER BY 1,2;
-- make sure oid of namespace keep stable
-- SQLNESS PROTOCOL POSTGRES
SELECT * FROM pg_namespace ORDER BY oid;
-- SQLNESS REPLACE (\d+\s*) OID
SELECT * FROM pg_namespace ORDER BY nspname;
+------------+--------------------+
| oid | nspname |
+------------+--------------------+
| 667359454 | pg_catalog |
| 3174397350 | information_schema |
| 3338153620 | greptime_private |
| 3927743705 | public |
| OID| greptime_private |
| OID| information_schema |
| OID| pg_catalog |
| OID| public |
+------------+--------------------+
-- SQLNESS PROTOCOL POSTGRES
@@ -260,6 +262,7 @@ where relnamespace in (
+---------+
-- SQLNESS PROTOCOL POSTGRES
-- SQLNESS REPLACE (\d+\s*) OID
select relnamespace, relname, relkind
from pg_catalog.pg_class
where relnamespace in (
@@ -274,7 +277,7 @@ order by relnamespace, relname;
+--------------+---------+---------+
| relnamespace | relname | relkind |
+--------------+---------+---------+
| 434869349 | foo | r |
| OID| foo | r |
+--------------+---------+---------+
-- SQLNESS PROTOCOL POSTGRES

Some files were not shown because too many files have changed in this diff Show More