mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-24 23:19:57 +00:00
Compare commits
32 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d57b144b2f | ||
|
|
46e106bcc3 | ||
|
|
a7507a2b12 | ||
|
|
5b8e5066a0 | ||
|
|
dcd481e6a4 | ||
|
|
3217b56cc1 | ||
|
|
eccad647d0 | ||
|
|
829db8c5c1 | ||
|
|
9056c3a6aa | ||
|
|
d9e7b898a3 | ||
|
|
59d4081f7a | ||
|
|
6e87ac0a0e | ||
|
|
d89cfd0d4d | ||
|
|
8a0054aa89 | ||
|
|
f859932745 | ||
|
|
9a8fc08e6a | ||
|
|
825e4beead | ||
|
|
0a23b40321 | ||
|
|
cf6ef0a30d | ||
|
|
65a659d136 | ||
|
|
62bcb45787 | ||
|
|
94f3542a4f | ||
|
|
fc3bc5327d | ||
|
|
9e33ddceea | ||
|
|
c9bdf4ff9f | ||
|
|
0a9972aa9a | ||
|
|
76d5b710c8 | ||
|
|
fe02366ce6 | ||
|
|
d7aeb369a6 | ||
|
|
9284bb7a2b | ||
|
|
e23dd5a44f | ||
|
|
c60b59adc8 |
103
.github/actions/build-dev-builder-image/action.yml
vendored
103
.github/actions/build-dev-builder-image/action.yml
vendored
@@ -1,103 +0,0 @@
|
||||
name: Build and push dev-builder image
|
||||
description: Build and push dev-builder image to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
acr-image-registry:
|
||||
description: The ACR image registry to store the images
|
||||
required: true
|
||||
acr-image-registry-username:
|
||||
description: The ACR username to login to the image registry
|
||||
required: true
|
||||
acr-image-registry-password:
|
||||
description: The ACR password to login to the image registry
|
||||
required: true
|
||||
acr-image-namespace:
|
||||
description: The ACR namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push android dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Login to ACR
|
||||
uses: docker/login-action@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ inputs.acr-image-registry }}
|
||||
username: ${{ inputs.acr-image-registry-username }}
|
||||
password: ${{ inputs.acr-image-registry-password }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Build and push dev-builder images
|
||||
description: Build and push dev-builder images to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-android == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
48
.github/actions/build-greptime-binary/action.yml
vendored
48
.github/actions/build-greptime-binary/action.yml
vendored
@@ -16,35 +16,20 @@ inputs:
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
build-android-artifacts:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make build-by-dev-builder \
|
||||
@@ -54,14 +39,25 @@ runs:
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/aarch64-linux-android/release/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
48
.github/actions/build-linux-artifacts/action.yml
vendored
48
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -13,30 +13,10 @@ inputs:
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -68,12 +48,6 @@ runs:
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
@@ -85,12 +59,6 @@ runs:
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
@@ -107,10 +75,14 @@ runs:
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
|
||||
21
.github/actions/build-macos-artifacts/action.yml
vendored
21
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -19,25 +19,9 @@ inputs:
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -103,8 +87,3 @@ runs:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
|
||||
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Build Windows artifacts
|
||||
description: Build Windows artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
|
||||
- name: Install latest nextest release # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Release artifacts
|
||||
description: Release artifacts
|
||||
name: Publish GitHub release
|
||||
description: Publish GitHub release
|
||||
inputs:
|
||||
version:
|
||||
description: Version to release
|
||||
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: Release CN artifacts
|
||||
description: Release artifacts to CN region
|
||||
inputs:
|
||||
src-image-registry:
|
||||
description: The source image registry to store the images
|
||||
required: true
|
||||
default: docker.io
|
||||
src-image-namespace:
|
||||
description: The namespace of the source image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
src-image-name:
|
||||
description: The name of the source image
|
||||
required: false
|
||||
default: greptimedb
|
||||
dst-image-registry:
|
||||
description: The destination image registry to store the images
|
||||
required: true
|
||||
dst-image-namespace:
|
||||
description: The namespace of the destination image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
dst-image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
dst-image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only push standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag of the image
|
||||
required: false
|
||||
default: 'true'
|
||||
aws-cn-s3-bucket:
|
||||
description: S3 bucket to store released artifacts in CN region
|
||||
required: true
|
||||
aws-cn-access-key-id:
|
||||
description: AWS access key id in CN region
|
||||
required: true
|
||||
aws-cn-secret-access-key:
|
||||
description: AWS secret access key in CN region
|
||||
required: true
|
||||
aws-cn-region:
|
||||
description: AWS region in CN
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: false
|
||||
default: 'artifacts'
|
||||
update-version-info:
|
||||
description: Update the version info in S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30" # minutes
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Release artifacts to cn region
|
||||
uses: nick-invision/retry@v2
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
./.github/scripts/upload-artifacts-to-s3.sh \
|
||||
${{ inputs.artifacts-dir }} \
|
||||
${{ inputs.version }} \
|
||||
${{ inputs.aws-cn-s3-bucket }}
|
||||
|
||||
- name: Push greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push latest greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
88
.github/actions/upload-artifacts/action.yml
vendored
88
.github/actions/upload-artifacts/action.yml
vendored
@@ -10,34 +10,6 @@ inputs:
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30" # minutes
|
||||
working-dir:
|
||||
description: Working directory to upload the artifacts
|
||||
required: false
|
||||
@@ -61,9 +33,21 @@ runs:
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Calculate checksum
|
||||
if: runner.os != 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Calculate checksum on Windows
|
||||
if: runner.os == 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: pwsh
|
||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||
- name: Upload artifacts
|
||||
@@ -77,49 +61,3 @@ runs:
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Upload artifacts to S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Upload latest artifacts to S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' && inputs.upload-latest-artifacts == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
47
.github/scripts/copy-image.sh
vendored
Executable file
47
.github/scripts/copy-image.sh
vendored
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
SRC_IMAGE=$1
|
||||
DST_REGISTRY=$2
|
||||
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Copies images from DockerHub to the destination registry.
|
||||
function copy_images_from_dockerhub() {
|
||||
# Check if docker is installed.
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "docker is not installed. Please install docker to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract the name and tag of the source image.
|
||||
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
|
||||
|
||||
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
|
||||
|
||||
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://"$DST_REGISTRY/$IMAGE_NAME"
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
copy_images_from_dockerhub
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
|
||||
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
main
|
||||
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
ARTIFACTS_DIR=$1
|
||||
VERSION=$2
|
||||
AWS_S3_BUCKET=$3
|
||||
RELEASE_DIRS="releases/greptimedb"
|
||||
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Uploads artifacts to AWS S3 bucket.
|
||||
function upload_artifacts() {
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
done
|
||||
}
|
||||
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
aws s3 cp \
|
||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||
fi
|
||||
|
||||
# If it's the nightly release, update latest-nightly-version.txt.
|
||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||
echo "Updating latest-nightly-version.txt"
|
||||
echo "$VERSION" > latest-nightly-version.txt
|
||||
aws s3 cp \
|
||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
|
||||
function download_artifacts_from_github() {
|
||||
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
|
||||
# Check if jq is installed.
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "jq is not installed. Please install jq to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest release API response.
|
||||
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
|
||||
|
||||
# Extract download URLs for the artifacts.
|
||||
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
|
||||
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
|
||||
|
||||
# Download each asset.
|
||||
while IFS= read -r url; do
|
||||
if [ -n "$url" ]; then
|
||||
curl -LJO "$url"
|
||||
echo "Downloaded: $url"
|
||||
fi
|
||||
done <<< "$ASSET_URLS"
|
||||
fi
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
download_artifacts_from_github
|
||||
upload_artifacts
|
||||
update_version_info
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
|
||||
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
|
||||
# AWS_DEFAULT_REGION=<your_region> \
|
||||
# UPDATE_VERSION_INFO=true \
|
||||
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
|
||||
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
|
||||
main
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -17,7 +17,7 @@ env:
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
67
.github/workflows/dev-build.yml
vendored
67
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -164,12 +164,7 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
@@ -198,12 +193,7 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
@@ -214,7 +204,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
@@ -239,41 +229,44 @@ jobs:
|
||||
run: |
|
||||
echo "build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -298,7 +291,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -325,7 +318,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
54
.github/workflows/develop.yml
vendored
54
.github/workflows/develop.yml
vendored
@@ -34,7 +34,7 @@ env:
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
toml:
|
||||
name: Toml Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
os: [ ubuntu-20.04-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -188,43 +188,3 @@ jobs:
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
test-on-windows:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
4
.github/workflows/doc-issue.yml
vendored
4
.github/workflows/doc-issue.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
|
||||
12
.github/workflows/docs.yml
vendored
12
.github/workflows/docs.yml
vendored
@@ -30,7 +30,7 @@ name: CI
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -38,33 +38,33 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
62
.github/workflows/nightly-build.yml
vendored
62
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -147,11 +147,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-latest-artifacts: false
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -171,11 +166,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-latest-artifacts: false
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -185,7 +175,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
@@ -208,15 +198,14 @@ jobs:
|
||||
run: |
|
||||
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -226,21 +215,30 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -265,7 +263,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -292,7 +290,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
82
.github/workflows/nightly-ci.yml
vendored
Normal file
82
.github/workflows/nightly-ci.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
4
.github/workflows/pr-title-checker.yml
vendored
4
.github/workflows/pr-title-checker.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
|
||||
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-ubuntu image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_centos_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-centos image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_android_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
140
.github/workflows/release.yml
vendored
140
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -63,7 +63,12 @@ on:
|
||||
description: Build macos artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_artifacts:
|
||||
build_windows_artifacts:
|
||||
type: boolean
|
||||
description: Build Windows artifacts
|
||||
required: false
|
||||
default: false
|
||||
publish_github_release:
|
||||
type: boolean
|
||||
description: Create GitHub release and upload artifacts
|
||||
required: false
|
||||
@@ -73,11 +78,6 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_image:
|
||||
type: boolean
|
||||
description: Release dev-builder image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
@@ -97,11 +97,12 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -177,11 +178,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -201,11 +197,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
@@ -247,12 +238,43 @@ jobs:
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -277,15 +299,14 @@ jobs:
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -295,18 +316,28 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: true
|
||||
push-latest-tag: true
|
||||
|
||||
release-artifacts:
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -314,36 +345,17 @@ jobs:
|
||||
build-macos-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Release artifacts
|
||||
uses: ./.github/actions/release-artifacts
|
||||
- name: Publish GitHub release
|
||||
uses: ./.github/actions/publish-github-release
|
||||
with:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-dev-builder-image:
|
||||
name: Release dev builder image
|
||||
if: ${{ inputs.release_dev_builder_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push dev builder image
|
||||
uses: ./.github/actions/build-dev-builder-image
|
||||
with:
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
@@ -351,7 +363,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -376,7 +388,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
|
||||
125
Cargo.lock
generated
125
Cargo.lock
generated
@@ -204,7 +204,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -666,7 +666,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -839,11 +839,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
"clap 4.4.1",
|
||||
"client",
|
||||
"futures-util",
|
||||
"indicatif",
|
||||
"itertools 0.10.5",
|
||||
"parquet",
|
||||
@@ -1220,7 +1222,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1504,7 +1506,7 @@ checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961"
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1534,7 +1536,7 @@ dependencies = [
|
||||
"rand",
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"substrait 0.7.5",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1571,7 +1573,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -1619,7 +1621,7 @@ dependencies = [
|
||||
"servers",
|
||||
"session",
|
||||
"snafu",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tikv-jemallocator",
|
||||
@@ -1652,7 +1654,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"bitvec",
|
||||
@@ -1667,7 +1669,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -1680,7 +1682,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"humantime-serde",
|
||||
@@ -1689,7 +1691,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1718,7 +1720,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"snafu",
|
||||
"strum 0.25.0",
|
||||
@@ -1726,7 +1728,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"chrono-tz 0.6.3",
|
||||
@@ -1749,7 +1751,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1768,7 +1770,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1798,7 +1800,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1817,7 +1819,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"backtrace",
|
||||
@@ -1834,7 +1836,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1847,7 +1849,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -1885,7 +1887,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -1909,7 +1911,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -1917,7 +1919,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1940,7 +1942,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -1957,7 +1959,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -1974,7 +1976,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-error",
|
||||
@@ -2001,7 +2003,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"rand",
|
||||
@@ -2010,7 +2012,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2025,7 +2027,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
]
|
||||
@@ -2663,7 +2665,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2722,7 +2724,7 @@ dependencies = [
|
||||
"sql",
|
||||
"storage",
|
||||
"store-api",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -2736,7 +2738,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3199,7 +3201,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -3309,7 +3311,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -3373,7 +3375,7 @@ dependencies = [
|
||||
"storage",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.7.6",
|
||||
@@ -5004,7 +5006,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -5274,7 +5276,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -5287,6 +5289,7 @@ dependencies = [
|
||||
"datatypes",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
"humantime-serde",
|
||||
"meta-srv",
|
||||
"rand",
|
||||
"serde",
|
||||
@@ -5303,7 +5306,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -5495,7 +5498,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"api",
|
||||
@@ -5957,7 +5960,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -6181,7 +6184,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-compat",
|
||||
@@ -6226,7 +6229,7 @@ dependencies = [
|
||||
"sqlparser 0.34.0",
|
||||
"storage",
|
||||
"store-api",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.9.2",
|
||||
@@ -6446,7 +6449,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6772,7 +6775,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"common-base",
|
||||
@@ -7022,7 +7025,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -7284,7 +7287,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"api",
|
||||
@@ -7341,7 +7344,7 @@ dependencies = [
|
||||
"stats-cli",
|
||||
"store-api",
|
||||
"streaming-stats",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -8540,7 +8543,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "script"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -8820,7 +8823,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"aide",
|
||||
"api",
|
||||
@@ -8914,7 +8917,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -9192,7 +9195,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -9243,7 +9246,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.4.1",
|
||||
@@ -9263,13 +9266,13 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.34.0"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4#296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da#6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"sqlparser 0.35.0",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4)",
|
||||
"sqlparser_derive 0.1.1 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9296,7 +9299,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlparser_derive"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4#296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4"
|
||||
source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da#6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -9449,7 +9452,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "storage"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -9503,7 +9506,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -9641,7 +9644,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
@@ -9799,7 +9802,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"anymap",
|
||||
"async-trait",
|
||||
@@ -9905,7 +9908,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -9958,7 +9961,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.4.0",
|
||||
"substrait 0.4.1",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
|
||||
@@ -55,7 +55,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -103,7 +103,7 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6cf9d23d5b8fbecd65efc1d9afb7e80ad7a424da", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
|
||||
2
Makefile
2
Makefile
@@ -94,7 +94,7 @@ build-android-bin: ## Build greptime binary for android.
|
||||
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
|
||||
|
||||
.PHONY: strip-android-bin
|
||||
strip-android-bin: ## Strip greptime binary for android.
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
|
||||
@@ -104,11 +104,11 @@ Or if you built from docker:
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
|
||||
@@ -6,8 +6,10 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
|
||||
@@ -29,14 +29,14 @@ use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
@@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
@@ -104,7 +109,7 @@ async fn write_data(
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
@@ -113,7 +118,7 @@ async fn write_data(
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
let _ = db.insert(requests).await.unwrap();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -131,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
@@ -141,8 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
semantic_type: semantic_type as i32,
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
@@ -243,11 +252,11 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: "".to_string(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
@@ -261,7 +270,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
@@ -405,31 +414,31 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: false,
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr()).await;
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
@@ -447,8 +456,10 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -457,24 +468,32 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database) {
|
||||
for (query_name, query) in query_set() {
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let _res = db.sql(&query).await.unwrap();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis()
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -491,13 +510,14 @@ fn main() {
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db).await;
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,19 +13,19 @@ rpc_runtime_size = 8
|
||||
require_lease_before_startup = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 3000 by default.
|
||||
interval_millis = 3000
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
interval = "3s"
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Heartbeat timeout in milliseconds, 500 by default.
|
||||
heartbeat_timeout_millis = 500
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 1000
|
||||
# Heartbeat timeout, 500 milliseconds by default.
|
||||
heartbeat_timeout = "500ms"
|
||||
# Operation timeout, 3 seconds by default.
|
||||
timeout = "3s"
|
||||
# Connect server timeout, 1 second by default.
|
||||
connect_timeout = "1s"
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -51,7 +51,7 @@ type = "File"
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256Mib"
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
mode = "distributed"
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||
interval = "5s"
|
||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||
retry_interval = "5s"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http]
|
||||
@@ -59,10 +59,10 @@ enable = true
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
timeout = "3s"
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 1000
|
||||
ddl_timeout = "10s"
|
||||
connect_timeout = "1s"
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
|
||||
@@ -32,6 +32,6 @@ retry_delay = "500ms"
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout_millis = 10000
|
||||
# connect_timeout_millis = 10000
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -82,6 +82,8 @@ enable = true
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
@@ -93,8 +95,8 @@ read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Kv options.
|
||||
[kv_store]
|
||||
# Metadata storage options.
|
||||
[metadata_store]
|
||||
# Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
@@ -118,7 +120,7 @@ type = "File"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256Mib"
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
FROM ubuntu:20.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
@@ -7,6 +7,11 @@ ARG OUTPUT_DIR
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
FROM ubuntu:22.04
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
|
||||
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# TSBS benchmark - v0.4.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Aliyun amd64
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | ecs.g7.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 |
|
||||
|
||||
### Aliyun arm64
|
||||
|
||||
| | |
|
||||
| ------- | ----------------- |
|
||||
| Machine | ecs.g8y.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 ARM |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate(rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 365280.60 |
|
||||
| Aliyun g7.4xlarge | 341368.72 |
|
||||
| Aliyun g8y.4xlarge | 320907.29 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | Aliyun g7.4xlarge (ms) | Aliyun g8y.4xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- | ----------------------- |
|
||||
| cpu-max-all-1 | 50.70 | 31.46 | 47.61 |
|
||||
| cpu-max-all-8 | 262.16 | 129.26 | 152.43 |
|
||||
| double-groupby-1 | 2512.71 | 1408.19 | 1586.10 |
|
||||
| double-groupby-5 | 3896.15 | 2304.29 | 2585.29 |
|
||||
| double-groupby-all | 5404.67 | 3337.61 | 3773.91 |
|
||||
| groupby-orderby-limit | 3786.98 | 2065.72 | 2312.57 |
|
||||
| high-cpu-1 | 71.96 | 37.29 | 54.01 |
|
||||
| high-cpu-all | 9468.75 | 7595.69 | 8467.46 |
|
||||
| lastpoint | 13379.43 | 11253.76 | 12949.40 |
|
||||
| single-groupby-1-1-1 | 20.72 | 12.16 | 13.35 |
|
||||
| single-groupby-1-1-12 | 28.53 | 15.67 | 21.62 |
|
||||
| single-groupby-1-8-1 | 72.23 | 37.90 | 43.52 |
|
||||
| single-groupby-5-1-1 | 26.75 | 15.59 | 17.48 |
|
||||
| single-groupby-5-1-12 | 45.41 | 22.90 | 31.96 |
|
||||
| single-groupby-5-8-1 | 107.96 | 59.76 | 69.58 |
|
||||
@@ -139,11 +139,19 @@ impl Client {
|
||||
}
|
||||
|
||||
fn max_grpc_recv_message_size(&self) -> usize {
|
||||
self.inner.channel_manager.config().max_recv_message_size
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
.max_recv_message_size
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
fn max_grpc_send_message_size(&self) -> usize {
|
||||
self.inner.channel_manager.config().max_send_message_size
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
.max_send_message_size
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
|
||||
@@ -167,11 +167,14 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
pub async fn sql<S>(&self, sql: S) -> Result<Output>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
{
|
||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
|
||||
@@ -26,6 +26,8 @@ use api::v1::greptime_response::Response;
|
||||
use api::v1::{AffectedRows, GreptimeResponse};
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::status_code::StatusCode;
|
||||
pub use common_query::Output;
|
||||
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod bench;
|
||||
mod cmd;
|
||||
mod export;
|
||||
mod helper;
|
||||
mod repl;
|
||||
// TODO(weny): Removes it
|
||||
@@ -27,6 +28,7 @@ use common_telemetry::logging::LoggingOptions;
|
||||
pub use repl::Repl;
|
||||
use upgrade::UpgradeCommand;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
@@ -78,17 +80,19 @@ impl Command {
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
// Attach(AttachCommand),
|
||||
Upgrade(UpgradeCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Upgrade(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build().await,
|
||||
SubCommand::Export(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,51 +108,9 @@ pub(crate) struct AttachCommand {
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
#[allow(dead_code)]
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance::Repl(repl))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
|
||||
assert!(logging_opts.level.is_none());
|
||||
assert!(!logging_opts.enable_jaeger_tracing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
395
src/cmd/src/cli/export.rs
Normal file
395
src/cmd/src/cli/export.rs
Normal file
@@ -0,0 +1,395 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::util::collect;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{StringVector, Vector};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
||||
InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu, Result,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ExportTarget {
|
||||
/// Corresponding to `SHOW CREATE TABLE`
|
||||
#[default]
|
||||
CreateTable,
|
||||
/// Corresponding to `EXPORT TABLE`
|
||||
TableData,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct ExportCommand {
|
||||
/// Server address to connect
|
||||
#[clap(long)]
|
||||
addr: String,
|
||||
|
||||
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
||||
#[clap(long)]
|
||||
output_dir: String,
|
||||
|
||||
/// The name of the catalog to export. Default to "greptime-*"".
|
||||
#[clap(long, default_value = "")]
|
||||
database: String,
|
||||
|
||||
/// Parallelism of the export.
|
||||
#[clap(long, short = 'j', default_value = "1")]
|
||||
export_jobs: usize,
|
||||
|
||||
/// Max retry times for each job.
|
||||
#[clap(long, default_value = "3")]
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
target: ExportTarget,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let client = Client::with_urls([self.addr.clone()]);
|
||||
client
|
||||
.health_check()
|
||||
.await
|
||||
.with_context(|_| ConnectServerSnafu {
|
||||
addr: self.addr.clone(),
|
||||
})?;
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
let database_client = Database::new(
|
||||
catalog.clone(),
|
||||
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
||||
client,
|
||||
);
|
||||
|
||||
Ok(Instance::Tool(Box::new(Export {
|
||||
client: database_client,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Export {
|
||||
client: Database,
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
/// Iterate over all db names.
|
||||
///
|
||||
/// Newbie: `db_name` is catalog + schema.
|
||||
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(self.catalog.clone());
|
||||
let result =
|
||||
client
|
||||
.sql("show databases")
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu {
|
||||
sql: "show databases".to_string(),
|
||||
})?;
|
||||
let Output::Stream(stream) = result else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
.context(EmptyResultSnafu)?;
|
||||
let schemas = record_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let mut result = Vec::with_capacity(schemas.len());
|
||||
for i in 0..schemas.len() {
|
||||
let schema = schemas.get_data(i).unwrap().to_owned();
|
||||
result.push((self.catalog.clone(), schema));
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a list of [`TableReference`] to be exported.
|
||||
/// Includes all tables under the given `catalog` and `schema`
|
||||
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\'\
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
);
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog);
|
||||
client.set_schema(schema);
|
||||
let result = client
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream) = result else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let Some(record_batch) = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
debug!("Fetched table list: {}", record_batch.pretty_print());
|
||||
|
||||
if record_batch.num_rows() == 0 {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let mut result = Vec::with_capacity(record_batch.num_rows());
|
||||
let catalog_column = record_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let schema_column = record_batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let table_column = record_batch
|
||||
.column(2)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
for i in 0..record_batch.num_rows() {
|
||||
let catalog = catalog_column.get_data(i).unwrap().to_owned();
|
||||
let schema = schema_column.get_data(i).unwrap().to_owned();
|
||||
let table = table_column.get_data(i).unwrap().to_owned();
|
||||
result.push((catalog, schema, table));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!("show create table {}.{}.{}", catalog, schema, table);
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog);
|
||||
client.set_schema(schema);
|
||||
let result = client
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream) = result else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
.context(EmptyResultSnafu)?;
|
||||
let create_table = record_batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap()
|
||||
.get_data(0)
|
||||
.unwrap();
|
||||
|
||||
Ok(format!("{create_table};\n"))
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let table_list = self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = table_list.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in table_list {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
Err(e) => {
|
||||
error!(e; "Failed to export table {}.{}.{}", c, s, t)
|
||||
}
|
||||
Ok(create_table) => {
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("success {success}/{db_count} jobs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog.clone());
|
||||
client.set_schema(schema.clone());
|
||||
|
||||
// copy database to
|
||||
let sql = format!(
|
||||
"copy database {} to '{}' with (format='parquet');",
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
client
|
||||
.sql(sql.clone())
|
||||
.await
|
||||
.context(RequestDatabaseSnafu { sql })?;
|
||||
info!("finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => {
|
||||
warn!("empty database {catalog}.{schema}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let mut file = File::create(copy_from_file).await.context(FileIoSnafu)?;
|
||||
|
||||
let copy_from_sql = dir_filenames
|
||||
.into_iter()
|
||||
.map(|file| {
|
||||
let file = file.unwrap();
|
||||
let filename = file.file_name().into_string().unwrap();
|
||||
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
filename.replace(".parquet", ""),
|
||||
file.path().to_str().unwrap()
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
file.write_all(copy_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("success {success}/{db_count} jobs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::CreateTable => self.export_create_table().await,
|
||||
ExportTarget::TableData => self.export_table_data().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = database
|
||||
.split_once('-')
|
||||
.with_context(|| InvalidDatabaseNameSnafu {
|
||||
database: database.to_string(),
|
||||
})?;
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
@@ -96,6 +96,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
@@ -149,6 +151,10 @@ impl StartCommand {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = &self.wal_dir {
|
||||
opts.wal.dir = Some(wal_dir.clone());
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr = http_addr.clone();
|
||||
}
|
||||
@@ -188,6 +194,7 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -204,11 +211,14 @@ mod tests {
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[heartbeat]
|
||||
interval = "300ms"
|
||||
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
ddl_timeout_millis= 10000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
ddl_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -251,25 +261,33 @@ mod tests {
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
assert_eq!("/other/wal", options.wal.dir.unwrap());
|
||||
|
||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
assert!(!options.wal.sync_write);
|
||||
|
||||
let HeartbeatOptions {
|
||||
interval: heart_beat_interval,
|
||||
..
|
||||
} = options.heartbeat;
|
||||
|
||||
assert_eq!(300, heart_beat_interval.as_millis());
|
||||
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
timeout,
|
||||
connect_timeout,
|
||||
ddl_timeout,
|
||||
tcp_nodelay,
|
||||
ddl_timeout_millis,
|
||||
..
|
||||
} = options.meta_client.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(10000, ddl_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout.as_millis());
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
@@ -363,8 +381,8 @@ mod tests {
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -428,6 +446,7 @@ mod tests {
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
wal_dir: Some("/other/wal/dir".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -455,6 +474,9 @@ mod tests {
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.checkpoint_margin,
|
||||
|
||||
@@ -37,6 +37,18 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop procedure manager"))]
|
||||
StopProcedureManager {
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start datanode"))]
|
||||
StartDatanode {
|
||||
location: Location,
|
||||
@@ -174,12 +186,45 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect server at {addr}"))]
|
||||
ConnectServer {
|
||||
addr: String,
|
||||
source: client::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput { location: Location },
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult { location: Location },
|
||||
|
||||
#[snafu(display("Failed to manipulate file"))]
|
||||
FileIo {
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid database name: {}", database))]
|
||||
InvalidDatabaseName {
|
||||
location: Location,
|
||||
database: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -198,12 +243,18 @@ impl ErrorExt for Error {
|
||||
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
@@ -215,7 +266,7 @@ impl ErrorExt for Error {
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long)]
|
||||
grpc_addr: Option<String>,
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -150,7 +150,7 @@ impl StartCommand {
|
||||
opts.http.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.grpc_addr {
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr = addr.clone()
|
||||
}
|
||||
|
||||
@@ -353,8 +353,8 @@ mod tests {
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[mysql]
|
||||
|
||||
@@ -30,7 +30,7 @@ pub const ENV_LIST_SEP: &str = ",";
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub kv_store: KvStoreConfig,
|
||||
pub metadata_store: KvStoreConfig,
|
||||
pub frontend: FrontendOptions,
|
||||
pub datanode: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
@@ -144,8 +144,8 @@ mod tests {
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -263,6 +263,9 @@ mod tests {
|
||||
]
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
},
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
@@ -41,8 +42,9 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
@@ -95,9 +97,10 @@ pub struct StandaloneOptions {
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub kv_store: KvStoreConfig,
|
||||
pub metadata_store: KvStoreConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub logging: LoggingOptions,
|
||||
pub user_provider: Option<String>,
|
||||
/// Options for different store engines.
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
}
|
||||
@@ -116,9 +119,10 @@ impl Default for StandaloneOptions {
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
kv_store: KvStoreConfig::default(),
|
||||
metadata_store: KvStoreConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
user_provider: None,
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig::default()),
|
||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||
@@ -140,6 +144,7 @@ impl StandaloneOptions {
|
||||
prom_store: self.prom_store,
|
||||
meta_client: None,
|
||||
logging: self.logging,
|
||||
user_provider: self.user_provider,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -159,6 +164,7 @@ impl StandaloneOptions {
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
@@ -167,6 +173,11 @@ impl Instance {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
self.procedure_manager
|
||||
.start()
|
||||
.await
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -177,6 +188,11 @@ impl Instance {
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.procedure_manager
|
||||
.stop()
|
||||
.await
|
||||
.context(StopProcedureManagerSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
@@ -277,7 +293,10 @@ impl StartCommand {
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
let kv_store = opts.kv_store.clone();
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
|
||||
let metadata_store = opts.metadata_store.clone();
|
||||
let procedure = opts.procedure.clone();
|
||||
let frontend = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
@@ -285,7 +304,7 @@ impl StartCommand {
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
procedure,
|
||||
kv_store,
|
||||
metadata_store,
|
||||
data_home: datanode.storage.data_home.to_string(),
|
||||
frontend,
|
||||
datanode,
|
||||
@@ -310,10 +329,15 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||
dir: &opts.data_home,
|
||||
})?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
metadata_dir,
|
||||
opts.kv_store,
|
||||
opts.metadata_store,
|
||||
opts.procedure,
|
||||
)
|
||||
.await
|
||||
@@ -342,7 +366,7 @@ impl StartCommand {
|
||||
let mut frontend = build_frontend(
|
||||
fe_plugins,
|
||||
kv_store,
|
||||
procedure_manager,
|
||||
procedure_manager.clone(),
|
||||
catalog_manager,
|
||||
region_server,
|
||||
)
|
||||
@@ -353,7 +377,11 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { datanode, frontend })
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -481,6 +509,8 @@ mod tests {
|
||||
assert_eq!(None, fe_opts.mysql.reject_no_database);
|
||||
assert!(fe_opts.influxdb.enable);
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
|
||||
|
||||
match &dn_opts.storage.store {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
@@ -614,7 +644,7 @@ mod tests {
|
||||
assert_eq!(options.influxdb, default_options.influxdb);
|
||||
assert_eq!(options.prom_store, default_options.prom_store);
|
||||
assert_eq!(options.wal, default_options.wal);
|
||||
assert_eq!(options.kv_store, default_options.kv_store);
|
||||
assert_eq!(options.metadata_store, default_options.metadata_store);
|
||||
assert_eq!(options.procedure, default_options.procedure);
|
||||
assert_eq!(options.logging, default_options.logging);
|
||||
assert_eq!(options.region_engine, default_options.region_engine);
|
||||
|
||||
@@ -14,10 +14,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs without any modification.
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{Display, Write};
|
||||
use std::fmt::{self, Debug, Display, Write};
|
||||
use std::ops::{Div, Mul};
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -34,7 +33,7 @@ pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
||||
|
||||
#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd)]
|
||||
pub struct ReadableSize(pub u64);
|
||||
|
||||
impl ReadableSize {
|
||||
@@ -155,6 +154,12 @@ impl FromStr for ReadableSize {
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for ReadableSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ReadableSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.0 >= PIB {
|
||||
|
||||
@@ -20,6 +20,8 @@ use serde::{Deserialize, Serialize};
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal directory
|
||||
pub dir: Option<String>,
|
||||
// wal file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
// wal purge threshold in bytes
|
||||
@@ -36,7 +38,8 @@ pub struct WalConfig {
|
||||
impl Default for WalConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
file_size: ReadableSize::mb(256), // log file size 256MB
|
||||
dir: None,
|
||||
file_size: ReadableSize::mb(256), // log file size 256MB
|
||||
purge_threshold: ReadableSize::gb(4), // purge threshold 4GB
|
||||
purge_interval: Duration::from_secs(600),
|
||||
read_batch_size: 128,
|
||||
|
||||
@@ -58,9 +58,15 @@ impl Function for RangeFunction {
|
||||
"range_fn"
|
||||
}
|
||||
|
||||
// range_fn will never been used, return_type could be arbitrary value, is not important
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
// The first argument to range_fn is the expression to be evaluated
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
input_types
|
||||
.first()
|
||||
.cloned()
|
||||
.ok_or(DataFusionError::Internal(
|
||||
"No expr found in range_fn".into(),
|
||||
))
|
||||
.context(GeneralDataFusionSnafu)
|
||||
}
|
||||
|
||||
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
use std::env;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_runtime::error::{Error, Result};
|
||||
@@ -24,7 +26,7 @@ use reqwest::{Client, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The URL to report telemetry data.
|
||||
pub const TELEMETRY_URL: &str = "https://api.greptime.cloud/db/otel/statistics";
|
||||
pub const TELEMETRY_URL: &str = "https://telemetry.greptimestats.com/db/otel/statistics";
|
||||
/// The local installation uuid cache file
|
||||
const UUID_FILE_NAME: &str = ".greptimedb-telemetry-uuid";
|
||||
|
||||
@@ -36,13 +38,26 @@ const GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT: Duration = Duration::from_sec
|
||||
const GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
pub enum GreptimeDBTelemetryTask {
|
||||
Enable(RepeatedTask<Error>),
|
||||
Enable((RepeatedTask<Error>, Arc<AtomicBool>)),
|
||||
Disable,
|
||||
}
|
||||
|
||||
impl GreptimeDBTelemetryTask {
|
||||
pub fn enable(interval: Duration, task_fn: BoxedTaskFunction<Error>) -> Self {
|
||||
GreptimeDBTelemetryTask::Enable(RepeatedTask::new(interval, task_fn))
|
||||
pub fn should_report(&self, value: bool) {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable((_, should_report)) => {
|
||||
should_report.store(value, Ordering::Relaxed);
|
||||
}
|
||||
GreptimeDBTelemetryTask::Disable => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable(
|
||||
interval: Duration,
|
||||
task_fn: BoxedTaskFunction<Error>,
|
||||
should_report: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
GreptimeDBTelemetryTask::Enable((RepeatedTask::new(interval, task_fn), should_report))
|
||||
}
|
||||
|
||||
pub fn disable() -> Self {
|
||||
@@ -51,7 +66,7 @@ impl GreptimeDBTelemetryTask {
|
||||
|
||||
pub fn start(&self) -> Result<()> {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable(task) => {
|
||||
GreptimeDBTelemetryTask::Enable((task, _)) => {
|
||||
print_anonymous_usage_data_disclaimer();
|
||||
task.start(common_runtime::bg_runtime())
|
||||
}
|
||||
@@ -61,7 +76,7 @@ impl GreptimeDBTelemetryTask {
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable(task) => task.stop().await,
|
||||
GreptimeDBTelemetryTask::Enable((task, _)) => task.stop().await,
|
||||
GreptimeDBTelemetryTask::Disable => Ok(()),
|
||||
}
|
||||
}
|
||||
@@ -191,6 +206,7 @@ pub struct GreptimeDBTelemetry {
|
||||
client: Option<Client>,
|
||||
working_home: Option<String>,
|
||||
telemetry_url: &'static str,
|
||||
should_report: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -200,13 +216,19 @@ impl TaskFunction<Error> for GreptimeDBTelemetry {
|
||||
}
|
||||
|
||||
async fn call(&mut self) -> Result<()> {
|
||||
self.report_telemetry_info().await;
|
||||
if self.should_report.load(Ordering::Relaxed) {
|
||||
self.report_telemetry_info().await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GreptimeDBTelemetry {
|
||||
pub fn new(working_home: Option<String>, statistics: Box<dyn Collector + Send + Sync>) -> Self {
|
||||
pub fn new(
|
||||
working_home: Option<String>,
|
||||
statistics: Box<dyn Collector + Send + Sync>,
|
||||
should_report: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let client = Client::builder()
|
||||
.connect_timeout(GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT)
|
||||
.timeout(GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT)
|
||||
@@ -216,6 +238,7 @@ impl GreptimeDBTelemetry {
|
||||
statistics,
|
||||
client: client.ok(),
|
||||
telemetry_url: TELEMETRY_URL,
|
||||
should_report,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,7 +273,8 @@ impl GreptimeDBTelemetry {
|
||||
mod tests {
|
||||
use std::convert::Infallible;
|
||||
use std::env;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::ports;
|
||||
@@ -370,7 +394,11 @@ mod tests {
|
||||
let working_home = working_home_temp.path().to_str().unwrap().to_string();
|
||||
|
||||
let test_statistic = Box::new(TestStatistic);
|
||||
let mut test_report = GreptimeDBTelemetry::new(Some(working_home.clone()), test_statistic);
|
||||
let mut test_report = GreptimeDBTelemetry::new(
|
||||
Some(working_home.clone()),
|
||||
test_statistic,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
);
|
||||
let url = Box::leak(format!("{}:{}", "http://localhost", port).into_boxed_str());
|
||||
test_report.telemetry_url = url;
|
||||
let response = test_report.report_telemetry_info().await.unwrap();
|
||||
@@ -384,7 +412,11 @@ mod tests {
|
||||
assert_eq!(1, body.nodes.unwrap());
|
||||
|
||||
let failed_statistic = Box::new(FailedStatistic);
|
||||
let mut failed_report = GreptimeDBTelemetry::new(Some(working_home), failed_statistic);
|
||||
let mut failed_report = GreptimeDBTelemetry::new(
|
||||
Some(working_home),
|
||||
failed_statistic,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
);
|
||||
failed_report.telemetry_url = url;
|
||||
let response = failed_report.report_telemetry_info().await;
|
||||
assert!(response.is_none());
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
@@ -31,8 +32,8 @@ use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsCon
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 1;
|
||||
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: ReadableSize = ReadableSize::mb(512);
|
||||
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: ReadableSize = ReadableSize::mb(512);
|
||||
|
||||
lazy_static! {
|
||||
static ref ID: AtomicU64 = AtomicU64::new(0);
|
||||
@@ -250,9 +251,9 @@ pub struct ChannelConfig {
|
||||
pub tcp_nodelay: bool,
|
||||
pub client_tls: Option<ClientTlsOption>,
|
||||
// Max gRPC receiving(decoding) message size
|
||||
pub max_recv_message_size: usize,
|
||||
pub max_recv_message_size: ReadableSize,
|
||||
// Max gRPC sending(encoding) message size
|
||||
pub max_send_message_size: usize,
|
||||
pub max_send_message_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for ChannelConfig {
|
||||
|
||||
@@ -258,7 +258,6 @@ impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
|
||||
self.bytes.to_vec()
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
/// Notes: used for test purpose.
|
||||
pub fn from_inner(inner: T) -> Self {
|
||||
let bytes = serde_json::to_vec(&inner).unwrap();
|
||||
|
||||
@@ -34,6 +34,9 @@ pub enum Error {
|
||||
#[snafu(display("Loader {} is already registered", name))]
|
||||
LoaderConflict { name: String, location: Location },
|
||||
|
||||
#[snafu(display("Procedure Manager is stopped"))]
|
||||
ManagerNotStart { location: Location },
|
||||
|
||||
#[snafu(display("Failed to serialize to json"))]
|
||||
ToJson {
|
||||
#[snafu(source)]
|
||||
@@ -148,7 +151,8 @@ impl ErrorExt for Error {
|
||||
| Error::FromJson { .. }
|
||||
| Error::RetryTimesExceeded { .. }
|
||||
| Error::RetryLater { .. }
|
||||
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
||||
| Error::WaitWatcher { .. }
|
||||
| Error::ManagerNotStart { .. } => StatusCode::Internal,
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
//! Common traits and structures for the procedure framework.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
pub mod error;
|
||||
pub mod local;
|
||||
pub mod options;
|
||||
|
||||
@@ -16,20 +16,21 @@ mod lock;
|
||||
mod runner;
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_runtime::{RepeatedTask, TaskFunction};
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::{info, logging};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::Notify;
|
||||
use tokio::sync::{Mutex as TokioMutex, Notify};
|
||||
|
||||
use crate::error::{
|
||||
DuplicateProcedureSnafu, Error, LoaderConflictSnafu, Result, StartRemoveOutdatedMetaTaskSnafu,
|
||||
StopRemoveOutdatedMetaTaskSnafu,
|
||||
DuplicateProcedureSnafu, Error, LoaderConflictSnafu, ManagerNotStartSnafu, Result,
|
||||
StartRemoveOutdatedMetaTaskSnafu, StopRemoveOutdatedMetaTaskSnafu,
|
||||
};
|
||||
use crate::local::lock::LockMap;
|
||||
use crate::local::runner::Runner;
|
||||
@@ -135,6 +136,8 @@ pub(crate) struct ManagerContext {
|
||||
messages: Mutex<HashMap<ProcedureId, ProcedureMessage>>,
|
||||
/// Ids and finished time of finished procedures.
|
||||
finished_procedures: Mutex<VecDeque<(ProcedureId, Instant)>>,
|
||||
/// Running flag.
|
||||
running: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -153,9 +156,29 @@ impl ManagerContext {
|
||||
procedures: RwLock::new(HashMap::new()),
|
||||
messages: Mutex::new(HashMap::new()),
|
||||
finished_procedures: Mutex::new(VecDeque::new()),
|
||||
running: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn set_running(&self) {
|
||||
self.running.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set the running flag.
|
||||
pub(crate) fn start(&self) {
|
||||
self.running.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub(crate) fn stop(&self) {
|
||||
self.running.store(false, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Return `ProcedureManager` is running.
|
||||
pub(crate) fn running(&self) -> bool {
|
||||
self.running.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns true if the procedure with specific `procedure_id` exists.
|
||||
fn contains_procedure(&self, procedure_id: ProcedureId) -> bool {
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
@@ -368,29 +391,37 @@ pub struct LocalManager {
|
||||
procedure_store: Arc<ProcedureStore>,
|
||||
max_retry_times: usize,
|
||||
retry_delay: Duration,
|
||||
remove_outdated_meta_task: RepeatedTask<Error>,
|
||||
/// GC task.
|
||||
remove_outdated_meta_task: TokioMutex<Option<RepeatedTask<Error>>>,
|
||||
config: ManagerConfig,
|
||||
}
|
||||
|
||||
impl LocalManager {
|
||||
/// Create a new [LocalManager] with specific `config`.
|
||||
pub fn new(config: ManagerConfig, state_store: StateStoreRef) -> LocalManager {
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
let remove_outdated_meta_task = RepeatedTask::new(
|
||||
config.remove_outdated_meta_task_interval,
|
||||
Box::new(RemoveOutdatedMetaFunction {
|
||||
manager_ctx: manager_ctx.clone(),
|
||||
ttl: config.remove_outdated_meta_ttl,
|
||||
}),
|
||||
);
|
||||
|
||||
LocalManager {
|
||||
manager_ctx,
|
||||
procedure_store: Arc::new(ProcedureStore::new(&config.parent_path, state_store)),
|
||||
max_retry_times: config.max_retry_times,
|
||||
retry_delay: config.retry_delay,
|
||||
remove_outdated_meta_task,
|
||||
remove_outdated_meta_task: TokioMutex::new(None),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build remove outedated meta task
|
||||
pub fn build_remove_outdated_meta_task(&self) -> RepeatedTask<Error> {
|
||||
RepeatedTask::new(
|
||||
self.config.remove_outdated_meta_task_interval,
|
||||
Box::new(RemoveOutdatedMetaFunction {
|
||||
manager_ctx: self.manager_ctx.clone(),
|
||||
ttl: self.config.remove_outdated_meta_ttl,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Submit a root procedure with given `procedure_id`.
|
||||
fn submit_root(
|
||||
&self,
|
||||
@@ -398,6 +429,8 @@ impl LocalManager {
|
||||
step: u32,
|
||||
procedure: BoxedProcedure,
|
||||
) -> Result<Watcher> {
|
||||
ensure!(self.manager_ctx.running(), ManagerNotStartSnafu);
|
||||
|
||||
let meta = Arc::new(ProcedureMeta::new(procedure_id, None, procedure.lock_key()));
|
||||
let runner = Runner {
|
||||
meta: meta.clone(),
|
||||
@@ -426,44 +459,8 @@ impl LocalManager {
|
||||
|
||||
Ok(watcher)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProcedureManager for LocalManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()> {
|
||||
let mut loaders = self.manager_ctx.loaders.lock().unwrap();
|
||||
ensure!(!loaders.contains_key(name), LoaderConflictSnafu { name });
|
||||
|
||||
let _ = loaders.insert(name.to_string(), loader);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start(&self) -> Result<()> {
|
||||
self.remove_outdated_meta_task
|
||||
.start(common_runtime::bg_runtime())
|
||||
.context(StartRemoveOutdatedMetaTaskSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
self.remove_outdated_meta_task
|
||||
.stop()
|
||||
.await
|
||||
.context(StopRemoveOutdatedMetaTaskSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher> {
|
||||
let procedure_id = procedure.id;
|
||||
ensure!(
|
||||
!self.manager_ctx.contains_procedure(procedure_id),
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)
|
||||
}
|
||||
|
||||
/// Recovers unfinished procedures and reruns them.
|
||||
async fn recover(&self) -> Result<()> {
|
||||
logging::info!("LocalManager start to recover");
|
||||
let recover_start = Instant::now();
|
||||
@@ -519,6 +516,64 @@ impl ProcedureManager for LocalManager {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProcedureManager for LocalManager {
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()> {
|
||||
let mut loaders = self.manager_ctx.loaders.lock().unwrap();
|
||||
ensure!(!loaders.contains_key(name), LoaderConflictSnafu { name });
|
||||
|
||||
let _ = loaders.insert(name.to_string(), loader);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start(&self) -> Result<()> {
|
||||
let mut task = self.remove_outdated_meta_task.lock().await;
|
||||
|
||||
if task.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let task_inner = self.build_remove_outdated_meta_task();
|
||||
|
||||
task_inner
|
||||
.start(common_runtime::bg_runtime())
|
||||
.context(StartRemoveOutdatedMetaTaskSnafu)?;
|
||||
|
||||
*task = Some(task_inner);
|
||||
|
||||
self.manager_ctx.start();
|
||||
|
||||
info!("LocalManager is start.");
|
||||
|
||||
self.recover().await
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
let mut task = self.remove_outdated_meta_task.lock().await;
|
||||
|
||||
if let Some(task) = task.take() {
|
||||
task.stop().await.context(StopRemoveOutdatedMetaTaskSnafu)?;
|
||||
}
|
||||
|
||||
self.manager_ctx.stop();
|
||||
|
||||
info!("LocalManager is stopped.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher> {
|
||||
let procedure_id = procedure.id;
|
||||
ensure!(
|
||||
!self.manager_ctx.contains_procedure(procedure_id),
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
|
||||
self.submit_root(procedure.id, 0, procedure.procedure)
|
||||
}
|
||||
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(self.manager_ctx.state(procedure_id))
|
||||
@@ -569,12 +624,14 @@ pub(crate) mod test_util {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use common_error::mock::MockError;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::error::{self, Error};
|
||||
use crate::store::state_store::ObjectStateStore;
|
||||
use crate::{Context, Procedure, Status};
|
||||
|
||||
@@ -691,6 +748,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
@@ -714,6 +772,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
.register_loader("ProcedureToLoad", ProcedureToLoad::loader())
|
||||
@@ -762,6 +821,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
@@ -812,6 +872,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {
|
||||
@@ -864,6 +925,66 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_procedure_manager_stopped() {
|
||||
let dir = create_temp_dir("procedure_manager_stopped");
|
||||
let config = ManagerConfig {
|
||||
parent_path: "data/".to_string(),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert_matches!(
|
||||
manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap_err(),
|
||||
error::Error::ManagerNotStart { .. }
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_procedure_manager_restart() {
|
||||
let dir = create_temp_dir("procedure_manager_restart");
|
||||
let config = ManagerConfig {
|
||||
parent_path: "data/".to_string(),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
|
||||
manager.start().await.unwrap();
|
||||
manager.stop().await.unwrap();
|
||||
manager.start().await.unwrap();
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_remove_outdated_meta_task() {
|
||||
let dir = create_temp_dir("remove_outdated_meta_task");
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
@@ -876,6 +997,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
@@ -889,8 +1011,9 @@ mod tests {
|
||||
.is_ok());
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
manager.start().unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
|
||||
manager.start().await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
@@ -902,6 +1025,8 @@ mod tests {
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
|
||||
manager.manager_ctx.set_running();
|
||||
assert!(manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
@@ -911,11 +1036,33 @@ mod tests {
|
||||
.is_ok());
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
// After restart
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
assert!(manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
|
||||
manager.start().await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::logging;
|
||||
use tokio::time;
|
||||
|
||||
use crate::error::{ProcedurePanicSnafu, Result};
|
||||
use crate::error::{self, ProcedurePanicSnafu, Result};
|
||||
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
|
||||
use crate::store::ProcedureStore;
|
||||
use crate::ProcedureState::Retrying;
|
||||
@@ -102,7 +102,6 @@ impl Drop for ProcedureGuard {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(yingwen): Support cancellation.
|
||||
pub(crate) struct Runner {
|
||||
pub(crate) meta: ProcedureMetaRef,
|
||||
pub(crate) procedure: BoxedProcedure,
|
||||
@@ -114,6 +113,11 @@ pub(crate) struct Runner {
|
||||
}
|
||||
|
||||
impl Runner {
|
||||
/// Return `ProcedureManager` is running.
|
||||
pub(crate) fn running(&self) -> bool {
|
||||
self.manager_ctx.running()
|
||||
}
|
||||
|
||||
/// Run the procedure.
|
||||
pub(crate) async fn run(mut self) {
|
||||
// Ensure we can update the procedure state.
|
||||
@@ -152,6 +156,12 @@ impl Runner {
|
||||
let procedure_ids = self.manager_ctx.procedures_in_tree(&self.meta);
|
||||
// Clean resources.
|
||||
self.manager_ctx.on_procedures_finish(&procedure_ids);
|
||||
|
||||
// If `ProcedureManager` is stopped, it stops the current task immediately without deleting the procedure.
|
||||
if !self.running() {
|
||||
return;
|
||||
}
|
||||
|
||||
for id in procedure_ids {
|
||||
if let Err(e) = self.store.delete_procedure(id).await {
|
||||
logging::error!(
|
||||
@@ -186,6 +196,13 @@ impl Runner {
|
||||
let mut retry = self.exponential_builder.build();
|
||||
let mut retry_times = 0;
|
||||
loop {
|
||||
// Don't store state if `ProcedureManager` is stopped.
|
||||
if !self.running() {
|
||||
self.meta.set_state(ProcedureState::Failed {
|
||||
error: Arc::new(error::ManagerNotStartSnafu {}.build()),
|
||||
});
|
||||
return;
|
||||
}
|
||||
match self.execute_once(ctx).await {
|
||||
ExecResult::Done | ExecResult::Failed => return,
|
||||
ExecResult::Continue => (),
|
||||
@@ -238,6 +255,14 @@ impl Runner {
|
||||
status.need_persist(),
|
||||
);
|
||||
|
||||
// Don't store state if `ProcedureManager` is stopped.
|
||||
if !self.running() {
|
||||
self.meta.set_state(ProcedureState::Failed {
|
||||
error: Arc::new(error::ManagerNotStartSnafu {}.build()),
|
||||
});
|
||||
return ExecResult::Failed;
|
||||
}
|
||||
|
||||
if status.need_persist() {
|
||||
if let Err(err) = self.persist_procedure().await {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
|
||||
@@ -272,6 +297,14 @@ impl Runner {
|
||||
e.is_retry_later(),
|
||||
);
|
||||
|
||||
// Don't store state if `ProcedureManager` is stopped.
|
||||
if !self.running() {
|
||||
self.meta.set_state(ProcedureState::Failed {
|
||||
error: Arc::new(error::ManagerNotStartSnafu {}.build()),
|
||||
});
|
||||
return ExecResult::Failed;
|
||||
}
|
||||
|
||||
if e.is_retry_later() {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return ExecResult::RetryLater;
|
||||
@@ -581,6 +614,7 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta, Box::new(normal), procedure_store.clone());
|
||||
runner.manager_ctx.start();
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_continue(), "{res:?}");
|
||||
@@ -641,6 +675,7 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta, Box::new(suspend), procedure_store);
|
||||
runner.manager_ctx.start();
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_continue(), "{res:?}");
|
||||
@@ -742,6 +777,7 @@ mod tests {
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(parent), procedure_store.clone());
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
manager_ctx.start();
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta));
|
||||
// Replace the manager ctx.
|
||||
@@ -769,6 +805,70 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_running_is_stopped() {
|
||||
let exec_fn = move |_| async move { Ok(Status::Executing { persist: true }) }.boxed();
|
||||
let normal = ProcedureAdapter {
|
||||
data: "normal".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("test_running_is_stopped");
|
||||
let meta = normal.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta, Box::new(normal), procedure_store.clone());
|
||||
runner.manager_ctx.start();
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_continue(), "{res:?}");
|
||||
check_files(
|
||||
&object_store,
|
||||
&procedure_store,
|
||||
ctx.procedure_id,
|
||||
&["0000000000.step"],
|
||||
)
|
||||
.await;
|
||||
|
||||
runner.manager_ctx.stop();
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_failed());
|
||||
// Shouldn't write any files
|
||||
check_files(
|
||||
&object_store,
|
||||
&procedure_store,
|
||||
ctx.procedure_id,
|
||||
&["0000000000.step"],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_running_is_stopped_on_error() {
|
||||
let exec_fn =
|
||||
|_| async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }.boxed();
|
||||
let normal = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single("catalog.schema.table"),
|
||||
exec_fn,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("test_running_is_stopped_on_error");
|
||||
let meta = normal.new_meta(ROOT_ID);
|
||||
let ctx = context_without_provider(meta.id);
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta, Box::new(normal), procedure_store.clone());
|
||||
runner.manager_ctx.stop();
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_failed(), "{res:?}");
|
||||
// Shouldn't write any files
|
||||
check_files(&object_store, &procedure_store, ctx.procedure_id, &[]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_on_error() {
|
||||
let exec_fn =
|
||||
@@ -785,6 +885,7 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(fail), procedure_store.clone());
|
||||
runner.manager_ctx.start();
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_failed(), "{res:?}");
|
||||
@@ -826,6 +927,7 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(retry_later), procedure_store.clone());
|
||||
runner.manager_ctx.start();
|
||||
|
||||
let res = runner.execute_once(&ctx).await;
|
||||
assert!(res.is_retry_later(), "{res:?}");
|
||||
@@ -863,6 +965,8 @@ mod tests {
|
||||
Box::new(exceed_max_retry_later),
|
||||
procedure_store,
|
||||
);
|
||||
runner.manager_ctx.start();
|
||||
|
||||
runner.exponential_builder = ExponentialBuilder::default()
|
||||
.with_min_delay(Duration::from_millis(1))
|
||||
.with_max_times(3);
|
||||
@@ -933,8 +1037,8 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(parent), procedure_store);
|
||||
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
manager_ctx.start();
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta.clone()));
|
||||
// Replace the manager ctx.
|
||||
|
||||
@@ -279,8 +279,14 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
/// Registers loader for specific procedure type `name`.
|
||||
fn register_loader(&self, name: &str, loader: BoxedProcedureLoader) -> Result<()>;
|
||||
|
||||
fn start(&self) -> Result<()>;
|
||||
/// Starts the background GC task.
|
||||
///
|
||||
/// Recovers unfinished procedures and reruns them.
|
||||
///
|
||||
/// Callers should ensure all loaders are registered.
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
/// Stops the background GC task.
|
||||
async fn stop(&self) -> Result<()>;
|
||||
|
||||
/// Submits a procedure to execute.
|
||||
@@ -288,11 +294,6 @@ pub trait ProcedureManager: Send + Sync + 'static {
|
||||
/// Returns a [Watcher] to watch the created procedure.
|
||||
async fn submit(&self, procedure: ProcedureWithId) -> Result<Watcher>;
|
||||
|
||||
/// Recovers unfinished procedures and reruns them.
|
||||
///
|
||||
/// Callers should ensure all loaders are registered.
|
||||
async fn recover(&self) -> Result<()>;
|
||||
|
||||
/// Query the procedure state.
|
||||
///
|
||||
/// Returns `Ok(None)` if the procedure doesn't exist.
|
||||
|
||||
@@ -71,6 +71,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
manager.start().await.unwrap();
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockProcedure {
|
||||
|
||||
@@ -13,8 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
|
||||
use datafusion::arrow::util::pretty::pretty_format_batches;
|
||||
use datatypes::schema::SchemaRef;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
@@ -169,6 +171,13 @@ impl RecordBatch {
|
||||
|
||||
Ok(vectors)
|
||||
}
|
||||
|
||||
/// Pretty display this record batch like a table
|
||||
pub fn pretty_print(&self) -> String {
|
||||
pretty_format_batches(slice::from_ref(&self.df_record_batch))
|
||||
.map(|t| t.to_string())
|
||||
.unwrap_or("failed to pretty display a record batch".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for RecordBatch {
|
||||
|
||||
@@ -333,9 +333,9 @@ pub struct DatanodeOptions {
|
||||
pub rpc_hostname: Option<String>,
|
||||
pub rpc_runtime_size: usize,
|
||||
// Max gRPC receiving(decoding) message size
|
||||
pub rpc_max_recv_message_size: usize,
|
||||
pub rpc_max_recv_message_size: ReadableSize,
|
||||
// Max gRPC sending(encoding) message size
|
||||
pub rpc_max_send_message_size: usize,
|
||||
pub rpc_max_send_message_size: ReadableSize,
|
||||
pub heartbeat: HeartbeatOptions,
|
||||
pub http: HttpOptions,
|
||||
pub meta_client: Option<MetaClientOptions>,
|
||||
|
||||
@@ -367,7 +367,10 @@ impl DatanodeBuilder {
|
||||
/// Build [RaftEngineLogStore]
|
||||
async fn build_log_store(opts: &DatanodeOptions) -> Result<Arc<RaftEngineLogStore>> {
|
||||
let data_home = normalize_dir(&opts.storage.data_home);
|
||||
let wal_dir = format!("{}{WAL_DIR}", data_home);
|
||||
let wal_dir = match &opts.wal.dir {
|
||||
Some(dir) => dir.clone(),
|
||||
None => format!("{}{WAL_DIR}", data_home),
|
||||
};
|
||||
let wal_config = opts.wal.clone();
|
||||
|
||||
// create WAL directory
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -60,6 +61,8 @@ pub async fn get_greptimedb_telemetry_task(
|
||||
if !enable || cfg!(test) || cfg!(debug_assertions) {
|
||||
return Arc::new(GreptimeDBTelemetryTask::disable());
|
||||
}
|
||||
// Always enable.
|
||||
let should_report = Arc::new(AtomicBool::new(true));
|
||||
|
||||
match mode {
|
||||
Mode::Standalone => Arc::new(GreptimeDBTelemetryTask::enable(
|
||||
@@ -70,7 +73,9 @@ pub async fn get_greptimedb_telemetry_task(
|
||||
uuid: default_get_uuid(&working_home),
|
||||
retry: 0,
|
||||
}),
|
||||
should_report.clone(),
|
||||
)),
|
||||
should_report,
|
||||
)),
|
||||
Mode::Distributed => Arc::new(GreptimeDBTelemetryTask::disable()),
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ impl HeartbeatTask {
|
||||
) -> Result<Self> {
|
||||
let region_alive_keeper = Arc::new(RegionAliveKeeper::new(
|
||||
region_server.clone(),
|
||||
opts.heartbeat.interval_millis,
|
||||
opts.heartbeat.interval.as_millis() as u64,
|
||||
));
|
||||
let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
@@ -86,7 +86,7 @@ impl HeartbeatTask {
|
||||
running: Arc::new(AtomicBool::new(false)),
|
||||
meta_client: Arc::new(meta_client),
|
||||
region_server,
|
||||
interval: opts.heartbeat.interval_millis,
|
||||
interval: opts.heartbeat.interval.as_millis() as u64,
|
||||
resp_handler_executor,
|
||||
region_alive_keeper,
|
||||
})
|
||||
@@ -332,14 +332,14 @@ pub async fn new_metasrv_client(
|
||||
let member_id = node_id;
|
||||
|
||||
let config = ChannelConfig::new()
|
||||
.timeout(Duration::from_millis(meta_config.timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
|
||||
.timeout(meta_config.timeout)
|
||||
.connect_timeout(meta_config.connect_timeout)
|
||||
.tcp_nodelay(meta_config.tcp_nodelay);
|
||||
let channel_manager = ChannelManager::with_config(config.clone());
|
||||
let heartbeat_channel_manager = ChannelManager::with_config(
|
||||
config
|
||||
.timeout(Duration::from_millis(meta_config.heartbeat_timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(meta_config.heartbeat_timeout_millis)),
|
||||
.timeout(meta_config.timeout)
|
||||
.connect_timeout(meta_config.connect_timeout),
|
||||
);
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(cluster_id, member_id, Role::Datanode)
|
||||
|
||||
@@ -40,8 +40,8 @@ impl Services {
|
||||
let region_server_handler = Some(Arc::new(region_server.clone()) as _);
|
||||
let runtime = region_server.runtime();
|
||||
let grpc_config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.rpc_max_recv_message_size,
|
||||
max_send_message_size: opts.rpc_max_send_message_size,
|
||||
max_recv_message_size: opts.rpc_max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.rpc_max_send_message_size.as_bytes() as usize,
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -49,8 +49,8 @@ impl HeartbeatTask {
|
||||
) -> Self {
|
||||
HeartbeatTask {
|
||||
meta_client,
|
||||
report_interval: heartbeat_opts.interval_millis,
|
||||
retry_interval: heartbeat_opts.retry_interval_millis,
|
||||
report_interval: heartbeat_opts.interval.as_millis() as u64,
|
||||
retry_interval: heartbeat_opts.retry_interval.as_millis() as u64,
|
||||
resp_handler_executor,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ mod script;
|
||||
mod standalone;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::Role;
|
||||
use async_trait::async_trait;
|
||||
@@ -231,14 +230,12 @@ impl Instance {
|
||||
);
|
||||
|
||||
let channel_config = ChannelConfig::new()
|
||||
.timeout(Duration::from_millis(meta_client_options.timeout_millis))
|
||||
.connect_timeout(Duration::from_millis(
|
||||
meta_client_options.connect_timeout_millis,
|
||||
))
|
||||
.timeout(meta_client_options.timeout)
|
||||
.connect_timeout(meta_client_options.connect_timeout)
|
||||
.tcp_nodelay(meta_client_options.tcp_nodelay);
|
||||
let ddl_channel_config = channel_config.clone().timeout(Duration::from_millis(
|
||||
meta_client_options.ddl_timeout_millis,
|
||||
));
|
||||
let ddl_channel_config = channel_config
|
||||
.clone()
|
||||
.timeout(meta_client_options.ddl_timeout);
|
||||
let channel_manager = ChannelManager::with_config(channel_config);
|
||||
let ddl_channel_manager = ChannelManager::with_config(ddl_channel_config);
|
||||
|
||||
|
||||
@@ -68,8 +68,8 @@ impl Services {
|
||||
);
|
||||
|
||||
let grpc_config = GrpcServerConfig {
|
||||
max_recv_message_size: opts.max_recv_message_size,
|
||||
max_send_message_size: opts.max_send_message_size,
|
||||
max_recv_message_size: opts.max_recv_message_size.as_bytes() as usize,
|
||||
max_send_message_size: opts.max_send_message_size.as_bytes() as usize,
|
||||
};
|
||||
let grpc_server = GrpcServer::new(
|
||||
Some(grpc_config),
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_grpc::channel_manager::{
|
||||
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
};
|
||||
@@ -22,9 +23,9 @@ pub struct GrpcOptions {
|
||||
pub addr: String,
|
||||
pub runtime_size: usize,
|
||||
// Max gRPC receiving(decoding) message size
|
||||
pub max_recv_message_size: usize,
|
||||
pub max_recv_message_size: ReadableSize,
|
||||
// Max gRPC sending(encoding) message size
|
||||
pub max_send_message_size: usize,
|
||||
pub max_send_message_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for GrpcOptions {
|
||||
|
||||
@@ -14,6 +14,7 @@ common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
etcd-client.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod client;
|
||||
@@ -21,31 +23,45 @@ pub mod error;
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct MetaClientOptions {
|
||||
pub metasrv_addrs: Vec<String>,
|
||||
pub timeout_millis: u64,
|
||||
#[serde(default = "default_heartbeat_timeout_millis")]
|
||||
pub heartbeat_timeout_millis: u64,
|
||||
#[serde(default = "default_ddl_timeout_millis")]
|
||||
pub ddl_timeout_millis: u64,
|
||||
pub connect_timeout_millis: u64,
|
||||
#[serde(default = "default_timeout")]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub timeout: Duration,
|
||||
#[serde(default = "default_heartbeat_timeout")]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub heartbeat_timeout: Duration,
|
||||
#[serde(default = "default_ddl_timeout")]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub ddl_timeout: Duration,
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub connect_timeout: Duration,
|
||||
pub tcp_nodelay: bool,
|
||||
}
|
||||
|
||||
fn default_heartbeat_timeout_millis() -> u64 {
|
||||
500u64
|
||||
fn default_heartbeat_timeout() -> Duration {
|
||||
Duration::from_millis(500u64)
|
||||
}
|
||||
|
||||
fn default_ddl_timeout_millis() -> u64 {
|
||||
10_000u64
|
||||
fn default_ddl_timeout() -> Duration {
|
||||
Duration::from_millis(10_000u64)
|
||||
}
|
||||
|
||||
fn default_connect_timeout() -> Duration {
|
||||
Duration::from_millis(1_000u64)
|
||||
}
|
||||
|
||||
fn default_timeout() -> Duration {
|
||||
Duration::from_millis(3_000u64)
|
||||
}
|
||||
|
||||
impl Default for MetaClientOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout_millis: 3_000u64,
|
||||
heartbeat_timeout_millis: default_heartbeat_timeout_millis(),
|
||||
ddl_timeout_millis: default_ddl_timeout_millis(),
|
||||
connect_timeout_millis: 1_000u64,
|
||||
timeout: default_timeout(),
|
||||
heartbeat_timeout: default_heartbeat_timeout(),
|
||||
ddl_timeout: default_ddl_timeout(),
|
||||
connect_timeout: default_connect_timeout(),
|
||||
tcp_nodelay: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,8 +111,7 @@ impl MetaSrvInstance {
|
||||
.await
|
||||
.context(error::SendShutdownSignalSnafu)?;
|
||||
}
|
||||
|
||||
self.meta_srv.shutdown();
|
||||
self.meta_srv.shutdown().await?;
|
||||
self.http_srv
|
||||
.shutdown()
|
||||
.await
|
||||
|
||||
@@ -41,6 +41,12 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start telemetry task"))]
|
||||
StartTelemetryTask {
|
||||
location: Location,
|
||||
source: common_runtime::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to submit ddl task"))]
|
||||
SubmitDdlTask {
|
||||
location: Location,
|
||||
@@ -393,8 +399,14 @@ pub enum Error {
|
||||
#[snafu(display("Missing required parameter, param: {:?}", param))]
|
||||
MissingRequiredParameter { param: String },
|
||||
|
||||
#[snafu(display("Failed to recover procedure"))]
|
||||
RecoverProcedure {
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
location: Location,
|
||||
source: common_procedure::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop procedure manager"))]
|
||||
StopProcedureManager {
|
||||
location: Location,
|
||||
source: common_procedure::Error,
|
||||
},
|
||||
@@ -616,16 +628,19 @@ impl ErrorExt for Error {
|
||||
Error::RequestDatanode { source, .. } => source.status_code(),
|
||||
Error::InvalidCatalogValue { source, .. }
|
||||
| Error::InvalidFullTableName { source, .. } => source.status_code(),
|
||||
Error::RecoverProcedure { source, .. }
|
||||
| Error::SubmitProcedure { source, .. }
|
||||
| Error::WaitProcedure { source, .. } => source.status_code(),
|
||||
Error::SubmitProcedure { source, .. } | Error::WaitProcedure { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::ShutdownServer { source, .. } | Error::StartHttp { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::ListCatalogs { source, .. } | Error::ListSchemas { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::StartTelemetryTask { source, .. } => source.status_code(),
|
||||
|
||||
Error::RegionFailoverCandidatesNotFound { .. } => StatusCode::RuntimeResourcesExhausted,
|
||||
Error::NextSequence { source, .. } => source.status_code(),
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -63,7 +64,8 @@ pub async fn get_greptimedb_telemetry_task(
|
||||
if !enable || cfg!(test) || cfg!(debug_assertions) {
|
||||
return Arc::new(GreptimeDBTelemetryTask::disable());
|
||||
}
|
||||
|
||||
// Controlled by meta server state, only leader reports the info.
|
||||
let should_report = Arc::new(AtomicBool::new(false));
|
||||
Arc::new(GreptimeDBTelemetryTask::enable(
|
||||
TELEMETRY_INTERVAL,
|
||||
Box::new(GreptimeDBTelemetry::new(
|
||||
@@ -73,6 +75,8 @@ pub async fn get_greptimedb_telemetry_task(
|
||||
uuid: default_get_uuid(&working_home),
|
||||
retry: 0,
|
||||
}),
|
||||
should_report.clone(),
|
||||
)),
|
||||
should_report,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_meta::sequence::SequenceRef;
|
||||
use common_procedure::options::ProcedureConfig;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use snafu::ResultExt;
|
||||
@@ -37,7 +37,10 @@ use tokio::sync::broadcast::error::RecvError;
|
||||
|
||||
use crate::cluster::MetaPeerClientRef;
|
||||
use crate::election::{Election, LeaderChangeMessage};
|
||||
use crate::error::{InitMetadataSnafu, RecoverProcedureSnafu, Result};
|
||||
use crate::error::{
|
||||
InitMetadataSnafu, Result, StartProcedureManagerSnafu, StartTelemetryTaskSnafu,
|
||||
StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::handler::HeartbeatHandlerGroup;
|
||||
use crate::lock::DistLockRef;
|
||||
use crate::pubsub::{PublishRef, SubscribeManagerRef};
|
||||
@@ -169,6 +172,37 @@ pub struct SelectorContext {
|
||||
pub type SelectorRef = Arc<dyn Selector<Context = SelectorContext, Output = Vec<Peer>>>;
|
||||
pub type ElectionRef = Arc<dyn Election<Leader = LeaderValue>>;
|
||||
|
||||
pub struct MetaStateHandler {
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
subscribe_manager: Option<SubscribeManagerRef>,
|
||||
greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
|
||||
}
|
||||
|
||||
impl MetaStateHandler {
|
||||
pub async fn on_become_leader(&self) {
|
||||
if let Err(e) = self.procedure_manager.start().await {
|
||||
error!(e; "Failed to start procedure manager");
|
||||
}
|
||||
self.greptimedb_telemetry_task.should_report(true);
|
||||
}
|
||||
|
||||
pub async fn on_become_follower(&self) {
|
||||
// Stops the procedures.
|
||||
if let Err(e) = self.procedure_manager.stop().await {
|
||||
error!(e; "Failed to stop procedure manager");
|
||||
}
|
||||
// Suspends reporting.
|
||||
self.greptimedb_telemetry_task.should_report(false);
|
||||
|
||||
if let Some(sub_manager) = self.subscribe_manager.clone() {
|
||||
info!("Leader changed, un_subscribe all");
|
||||
if let Err(e) = sub_manager.un_subscribe_all() {
|
||||
error!("Failed to un_subscribe all, error: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MetaSrv {
|
||||
started: Arc<AtomicBool>,
|
||||
@@ -212,7 +246,15 @@ impl MetaSrv {
|
||||
let leader_cached_kv_store = self.leader_cached_kv_store.clone();
|
||||
let subscribe_manager = self.subscribe_manager();
|
||||
let mut rx = election.subscribe_leader_change();
|
||||
let task_handler = self.greptimedb_telemetry_task.clone();
|
||||
let greptimedb_telemetry_task = self.greptimedb_telemetry_task.clone();
|
||||
greptimedb_telemetry_task
|
||||
.start()
|
||||
.context(StartTelemetryTaskSnafu)?;
|
||||
let state_handler = MetaStateHandler {
|
||||
greptimedb_telemetry_task,
|
||||
subscribe_manager,
|
||||
procedure_manager,
|
||||
};
|
||||
let _handle = common_runtime::spawn_bg(async move {
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
@@ -225,28 +267,12 @@ impl MetaSrv {
|
||||
);
|
||||
match msg {
|
||||
LeaderChangeMessage::Elected(_) => {
|
||||
if let Err(e) = procedure_manager.recover().await {
|
||||
error!("Failed to recover procedures, error: {e}");
|
||||
}
|
||||
let _ = task_handler.start().map_err(|e| {
|
||||
debug!(
|
||||
"Failed to start greptimedb telemetry task, error: {e}"
|
||||
);
|
||||
});
|
||||
state_handler.on_become_leader().await;
|
||||
}
|
||||
LeaderChangeMessage::StepDown(leader) => {
|
||||
if let Some(sub_manager) = subscribe_manager.clone() {
|
||||
info!("Leader changed, un_subscribe all");
|
||||
if let Err(e) = sub_manager.un_subscribe_all() {
|
||||
error!("Failed to un_subscribe all, error: {}", e);
|
||||
}
|
||||
}
|
||||
error!("Leader :{:?} step down", leader);
|
||||
let _ = task_handler.stop().await.map_err(|e| {
|
||||
debug!(
|
||||
"Failed to stop greptimedb telemetry task, error: {e}"
|
||||
);
|
||||
});
|
||||
|
||||
state_handler.on_become_follower().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -259,6 +285,8 @@ impl MetaSrv {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state_handler.on_become_follower().await;
|
||||
});
|
||||
|
||||
let election = election.clone();
|
||||
@@ -275,9 +303,9 @@ impl MetaSrv {
|
||||
});
|
||||
} else {
|
||||
self.procedure_manager
|
||||
.recover()
|
||||
.start()
|
||||
.await
|
||||
.context(RecoverProcedureSnafu)?;
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
}
|
||||
|
||||
info!("MetaSrv started");
|
||||
@@ -291,8 +319,12 @@ impl MetaSrv {
|
||||
.context(InitMetadataSnafu)
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
self.started.store(false, Ordering::Relaxed);
|
||||
self.procedure_manager
|
||||
.stop()
|
||||
.await
|
||||
.context(StopProcedureManagerSnafu)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -274,6 +274,9 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Missing table mutation handler"))]
|
||||
MissingTableMutationHandler { location: Location },
|
||||
|
||||
#[snafu(display("Range Query: {}", msg))]
|
||||
RangeQuery { msg: String, location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -281,7 +284,9 @@ impl ErrorExt for Error {
|
||||
use Error::*;
|
||||
|
||||
match self {
|
||||
QueryParse { .. } | MultipleStatements { .. } => StatusCode::InvalidSyntax,
|
||||
QueryParse { .. } | MultipleStatements { .. } | RangeQuery { .. } => {
|
||||
StatusCode::InvalidSyntax
|
||||
}
|
||||
UnsupportedExpr { .. }
|
||||
| Unimplemented { .. }
|
||||
| CatalogNotFound { .. }
|
||||
|
||||
@@ -79,7 +79,7 @@ impl DfLogicalPlanner {
|
||||
let result = sql_to_rel
|
||||
.statement_to_plan(df_stmt)
|
||||
.context(PlanSqlSnafu)?;
|
||||
let plan = RangePlanRewriter::new(table_provider, context_provider)
|
||||
let plan = RangePlanRewriter::new(table_provider)
|
||||
.rewrite(result)
|
||||
.await?;
|
||||
Ok(LogicalPlan::DfPlan(plan))
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
|
||||
use ahash::RandomState;
|
||||
use arrow::compute;
|
||||
use arrow::compute::{self, cast_with_options, CastOptions};
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef, TimeUnit};
|
||||
use common_query::DfPhysicalPlan;
|
||||
use common_recordbatch::DfSendableRecordBatchStream;
|
||||
@@ -33,6 +33,7 @@ use datafusion::physical_plan::udaf::create_aggregate_expr as create_aggr_udf_ex
|
||||
use datafusion::physical_plan::{
|
||||
DisplayAs, DisplayFormatType, ExecutionPlan, RecordBatchStream, SendableRecordBatchStream,
|
||||
};
|
||||
use datafusion::physical_planner::create_physical_sort_expr;
|
||||
use datafusion_common::utils::get_arrayref_at_indices;
|
||||
use datafusion_common::{DFField, DFSchema, DFSchemaRef, DataFusionError, ScalarValue};
|
||||
use datafusion_expr::utils::exprlist_to_fields;
|
||||
@@ -54,22 +55,135 @@ use crate::error::{DataFusionSnafu, Result};
|
||||
|
||||
type Millisecond = <TimestampMillisecondType as ArrowPrimitiveType>::Native;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
||||
#[derive(PartialEq, Eq, Debug, Hash, Clone)]
|
||||
pub enum Fill {
|
||||
Null,
|
||||
Prev,
|
||||
Linear,
|
||||
Const(ScalarValue),
|
||||
}
|
||||
|
||||
impl Display for Fill {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Fill::Null => write!(f, "NULL"),
|
||||
Fill::Prev => write!(f, "PREV"),
|
||||
Fill::Linear => write!(f, "LINEAR"),
|
||||
Fill::Const(x) => write!(f, "{}", x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Fill {
|
||||
pub fn try_from_str(value: &str, datatype: &DataType) -> DfResult<Self> {
|
||||
let s = value.to_uppercase();
|
||||
match s.as_str() {
|
||||
"NULL" | "" => Ok(Self::Null),
|
||||
"PREV" => Ok(Self::Prev),
|
||||
"LINEAR" => {
|
||||
if datatype.is_numeric() {
|
||||
Ok(Self::Linear)
|
||||
} else {
|
||||
Err(DataFusionError::Plan(format!(
|
||||
"Use FILL LINEAR on Non-numeric DataType {}",
|
||||
datatype
|
||||
)))
|
||||
}
|
||||
}
|
||||
_ => ScalarValue::try_from_string(s.clone(), datatype)
|
||||
.map_err(|err| {
|
||||
DataFusionError::Plan(format!(
|
||||
"{} is not a valid fill option, fail to convert to a const value. {{ {} }}",
|
||||
s, err
|
||||
))
|
||||
})
|
||||
.map(Fill::Const),
|
||||
}
|
||||
}
|
||||
|
||||
/// The input `data` contains data on a complete time series.
|
||||
/// If the filling strategy is `PREV` or `LINEAR`, caller must be ensured that the incoming `data` is ascending time order.
|
||||
pub fn apply_fill_strategy(&self, data: &mut [ScalarValue]) -> DfResult<()> {
|
||||
let len = data.len();
|
||||
for i in 0..len {
|
||||
if data[i].is_null() {
|
||||
match self {
|
||||
Fill::Null => continue,
|
||||
Fill::Prev => {
|
||||
if i != 0 {
|
||||
data[i] = data[i - 1].clone()
|
||||
}
|
||||
}
|
||||
Fill::Linear => {
|
||||
if 0 < i && i < len - 1 {
|
||||
match (&data[i - 1], &data[i + 1]) {
|
||||
(ScalarValue::Float64(Some(a)), ScalarValue::Float64(Some(b))) => {
|
||||
data[i] = ScalarValue::Float64(Some((a + b) / 2.0));
|
||||
}
|
||||
(ScalarValue::Float32(Some(a)), ScalarValue::Float32(Some(b))) => {
|
||||
data[i] = ScalarValue::Float32(Some((a + b) / 2.0));
|
||||
}
|
||||
(a, b) => {
|
||||
if !a.is_null() && !b.is_null() {
|
||||
return Err(DataFusionError::Execution(
|
||||
"RangePlan: Apply Fill LINEAR strategy on Non-floating type".to_string()));
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Fill::Const(v) => data[i] = v.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, Clone, Debug)]
|
||||
pub struct RangeFn {
|
||||
/// with format like `max(a) 300s null`
|
||||
pub name: String,
|
||||
pub data_type: DataType,
|
||||
pub expr: Expr,
|
||||
pub range: Duration,
|
||||
pub fill: String,
|
||||
pub fill: Fill,
|
||||
/// If the `FIll` strategy is `Linear` and the output is an integer,
|
||||
/// it is possible to calculate a floating point number.
|
||||
/// So for `FILL==LINEAR`, the entire data will be implicitly converted to Float type
|
||||
/// If `need_cast==true`, `data_type` may not consist with type `expr` generated.
|
||||
pub need_cast: bool,
|
||||
}
|
||||
|
||||
impl PartialEq for RangeFn {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.name == other.name
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for RangeFn {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for RangeFn {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.name.cmp(&other.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::hash::Hash for RangeFn {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.name.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for RangeFn {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"RangeFn {{ expr:{} range:{}s fill:{} }}",
|
||||
self.expr.display_name().unwrap_or("?".into()),
|
||||
self.range.as_secs(),
|
||||
self.fill,
|
||||
)
|
||||
write!(f, "{}", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,16 +219,21 @@ impl RangeSelect {
|
||||
) -> Result<Self> {
|
||||
let mut fields = range_expr
|
||||
.iter()
|
||||
.map(|RangeFn { expr, .. }| {
|
||||
Ok(DFField::new_unqualified(
|
||||
&expr.display_name()?,
|
||||
expr.get_type(input.schema())?,
|
||||
// TODO(Taylor-lagrange): We have not implemented fill currently,
|
||||
// it is possible that some columns may not be able to aggregate data,
|
||||
// so we temporarily set that all data is nullable
|
||||
true,
|
||||
))
|
||||
})
|
||||
.map(
|
||||
|RangeFn {
|
||||
name,
|
||||
data_type,
|
||||
fill,
|
||||
..
|
||||
}| {
|
||||
Ok(DFField::new_unqualified(
|
||||
name,
|
||||
data_type.clone(),
|
||||
// Only when data fill with Const option, the data can't be null
|
||||
!matches!(fill, Fill::Const(..)),
|
||||
))
|
||||
},
|
||||
)
|
||||
.collect::<DfResult<Vec<_>>>()
|
||||
.context(DataFusionSnafu)?;
|
||||
// add align_ts
|
||||
@@ -135,10 +254,8 @@ impl RangeSelect {
|
||||
DFSchema::new_with_metadata(by_fields, input.schema().metadata().clone())
|
||||
.context(DataFusionSnafu)?,
|
||||
);
|
||||
// If the result of the project plan happens to be the schema of the range plan, no project plan is required
|
||||
// that need project is identical to range plan schema.
|
||||
// 1. all exprs in project must belong to range schema
|
||||
// 2. range schema and project exprs must have same size
|
||||
// If the results of project plan can be obtained directly from range plan without any additional calculations, no project plan is required.
|
||||
// We can simply project the final output of the range plan to produce the final result.
|
||||
let schema_project = projection_expr
|
||||
.iter()
|
||||
.map(|project_expr| {
|
||||
@@ -268,52 +385,68 @@ impl RangeSelect {
|
||||
.range_expr
|
||||
.iter()
|
||||
.map(|range_fn| {
|
||||
let (expr, args) = match &range_fn.expr {
|
||||
let expr = match &range_fn.expr {
|
||||
Expr::AggregateFunction(aggr) => {
|
||||
let args = self.create_physical_expr_list(
|
||||
&aggr.args,
|
||||
input_dfschema,
|
||||
&input_schema,
|
||||
session_state,
|
||||
)?;
|
||||
Ok((
|
||||
create_aggr_expr(
|
||||
&aggr.fun,
|
||||
false,
|
||||
&args,
|
||||
&[],
|
||||
let order_by = if let Some(exprs) = &aggr.order_by {
|
||||
exprs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
create_physical_sort_expr(
|
||||
x,
|
||||
input_dfschema,
|
||||
&input_schema,
|
||||
session_state.execution_props(),
|
||||
)
|
||||
})
|
||||
.collect::<DfResult<Vec<_>>>()?
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let expr = create_aggr_expr(
|
||||
&aggr.fun,
|
||||
false,
|
||||
&self.create_physical_expr_list(
|
||||
&aggr.args,
|
||||
input_dfschema,
|
||||
&input_schema,
|
||||
range_fn.expr.display_name()?,
|
||||
session_state,
|
||||
)?,
|
||||
args,
|
||||
))
|
||||
&order_by,
|
||||
&input_schema,
|
||||
range_fn.expr.display_name()?,
|
||||
)?;
|
||||
Ok(expr)
|
||||
}
|
||||
Expr::AggregateUDF(aggr_udf) => {
|
||||
let args = self.create_physical_expr_list(
|
||||
&aggr_udf.args,
|
||||
input_dfschema,
|
||||
&input_schema,
|
||||
session_state,
|
||||
)?;
|
||||
Ok((
|
||||
create_aggr_udf_expr(
|
||||
&aggr_udf.fun,
|
||||
&args,
|
||||
let expr = create_aggr_udf_expr(
|
||||
&aggr_udf.fun,
|
||||
&self.create_physical_expr_list(
|
||||
&aggr_udf.args,
|
||||
input_dfschema,
|
||||
&input_schema,
|
||||
range_fn.expr.display_name()?,
|
||||
session_state,
|
||||
)?,
|
||||
args,
|
||||
))
|
||||
&input_schema,
|
||||
range_fn.expr.display_name()?,
|
||||
)?;
|
||||
Ok(expr)
|
||||
}
|
||||
_ => Err(DataFusionError::Plan(format!(
|
||||
"Unexpected Expr:{} in RangeSelect",
|
||||
range_fn.expr.display_name()?
|
||||
))),
|
||||
}?;
|
||||
let args = expr.expressions();
|
||||
Ok(RangeFnExec {
|
||||
expr,
|
||||
args,
|
||||
range: range_fn.range.as_millis() as Millisecond,
|
||||
fill: range_fn.fill.clone(),
|
||||
need_cast: if range_fn.need_cast {
|
||||
Some(range_fn.data_type.clone())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
})
|
||||
.collect::<DfResult<Vec<_>>>()?;
|
||||
@@ -348,6 +481,8 @@ struct RangeFnExec {
|
||||
pub expr: Arc<dyn AggregateExpr>,
|
||||
pub args: Vec<Arc<dyn PhysicalExpr>>,
|
||||
pub range: Millisecond,
|
||||
pub fill: Fill,
|
||||
pub need_cast: Option<DataType>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -540,6 +675,15 @@ fn align_to_calendar(
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_scalar_values(values: &mut [ScalarValue], data_type: &DataType) -> DfResult<()> {
|
||||
let array = ScalarValue::iter_to_array(values.to_vec())?;
|
||||
let cast_array = cast_with_options(&array, data_type, &CastOptions::default())?;
|
||||
for (i, value) in values.iter_mut().enumerate() {
|
||||
*value = ScalarValue::try_from_array(&cast_array, i)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl RangeSelectStream {
|
||||
fn evaluate_many(
|
||||
&self,
|
||||
@@ -648,20 +792,57 @@ impl RangeSelectStream {
|
||||
let mut columns: Vec<Arc<dyn Array>> =
|
||||
Vec::with_capacity(1 + self.range_exec.len() + self.by.len());
|
||||
let mut ts_builder = TimestampMillisecondBuilder::with_capacity(self.output_num_rows);
|
||||
let mut all_scalar = vec![vec![]; self.range_exec.len()];
|
||||
let mut all_scalar = vec![Vec::with_capacity(self.output_num_rows); self.range_exec.len()];
|
||||
let mut by_rows = Vec::with_capacity(self.output_num_rows);
|
||||
let mut start_index = 0;
|
||||
// RangePlan is calculated on a row basis. If a column uses the PREV or LINEAR filling strategy,
|
||||
// we must arrange the data in the entire data row to determine the NULL filling value.
|
||||
let need_sort_output = self
|
||||
.range_exec
|
||||
.iter()
|
||||
.any(|range| range.fill == Fill::Linear || range.fill == Fill::Prev);
|
||||
for SeriesState {
|
||||
row,
|
||||
align_ts_accumulator,
|
||||
} in self.series_map.values()
|
||||
{
|
||||
for (ts, accumulators) in align_ts_accumulator {
|
||||
for (i, accumulator) in accumulators.iter().enumerate() {
|
||||
all_scalar[i].push(accumulator.evaluate()?);
|
||||
// collect data on time series
|
||||
if !need_sort_output {
|
||||
for (ts, accumulators) in align_ts_accumulator {
|
||||
for (i, accumulator) in accumulators.iter().enumerate() {
|
||||
all_scalar[i].push(accumulator.evaluate()?);
|
||||
}
|
||||
ts_builder.append_value(*ts);
|
||||
}
|
||||
by_rows.push(row.row());
|
||||
ts_builder.append_value(*ts);
|
||||
} else {
|
||||
let mut keys = align_ts_accumulator.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
for key in &keys {
|
||||
for (i, accumulator) in
|
||||
align_ts_accumulator.get(key).unwrap().iter().enumerate()
|
||||
{
|
||||
all_scalar[i].push(accumulator.evaluate()?);
|
||||
}
|
||||
}
|
||||
ts_builder.append_slice(&keys);
|
||||
}
|
||||
// apply fill strategy on time series
|
||||
for (
|
||||
i,
|
||||
RangeFnExec {
|
||||
fill, need_cast, ..
|
||||
},
|
||||
) in self.range_exec.iter().enumerate()
|
||||
{
|
||||
let time_series_data =
|
||||
&mut all_scalar[i][start_index..start_index + align_ts_accumulator.len()];
|
||||
if let Some(data_type) = need_cast {
|
||||
cast_scalar_values(time_series_data, data_type)?;
|
||||
}
|
||||
fill.apply_fill_strategy(time_series_data)?;
|
||||
}
|
||||
by_rows.resize(by_rows.len() + align_ts_accumulator.len(), row.row());
|
||||
start_index += align_ts_accumulator.len();
|
||||
}
|
||||
for column_scalar in all_scalar {
|
||||
columns.push(ScalarValue::iter_to_array(column_scalar)?);
|
||||
@@ -720,15 +901,15 @@ impl Stream for RangeSelectStream {
|
||||
}
|
||||
ExecutionState::ProducingOutput => {
|
||||
let result = self.generate_output();
|
||||
match result {
|
||||
return match result {
|
||||
// made output
|
||||
Ok(batch) => {
|
||||
self.exec_state = ExecutionState::Done;
|
||||
return Poll::Ready(Some(Ok(batch)));
|
||||
Poll::Ready(Some(Ok(batch)))
|
||||
}
|
||||
// error making output
|
||||
Err(error) => return Poll::Ready(Some(Err(error))),
|
||||
}
|
||||
Err(error) => Poll::Ready(Some(Err(error))),
|
||||
};
|
||||
}
|
||||
ExecutionState::Done => return Poll::Ready(None),
|
||||
}
|
||||
@@ -738,6 +919,34 @@ impl Stream for RangeSelectStream {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
macro_rules! nullable_array {
|
||||
($builder:ident,) => {
|
||||
};
|
||||
($array_type:ident ; $($tail:tt)*) => {
|
||||
paste::item! {
|
||||
{
|
||||
let mut builder = arrow::array::[<$array_type Builder>]::new();
|
||||
nullable_array!(builder, $($tail)*);
|
||||
builder.finish()
|
||||
}
|
||||
}
|
||||
};
|
||||
($builder:ident, null) => {
|
||||
$builder.append_null();
|
||||
};
|
||||
($builder:ident, null, $($tail:tt)*) => {
|
||||
$builder.append_null();
|
||||
nullable_array!($builder, $($tail)*);
|
||||
};
|
||||
($builder:ident, $value:literal) => {
|
||||
$builder.append_value($value);
|
||||
};
|
||||
($builder:ident, $value:literal, $($tail:tt)*) => {
|
||||
$builder.append_value($value);
|
||||
nullable_array!($builder, $($tail)*);
|
||||
};
|
||||
}
|
||||
|
||||
use arrow_schema::SortOptions;
|
||||
use datafusion::arrow::datatypes::{
|
||||
ArrowPrimitiveType, DataType, Field, Schema, TimestampMillisecondType,
|
||||
@@ -747,33 +956,45 @@ mod test {
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_physical_expr::expressions::{self, Column};
|
||||
use datafusion_physical_expr::PhysicalSortExpr;
|
||||
use datatypes::arrow::array::{Int64Array, TimestampMillisecondArray};
|
||||
use datatypes::arrow::array::TimestampMillisecondArray;
|
||||
use datatypes::arrow_array::StringArray;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TIME_INDEX_COLUMN: &str = "timestamp";
|
||||
|
||||
fn prepare_test_data() -> MemoryExec {
|
||||
fn prepare_test_data(is_float: bool) -> MemoryExec {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
|
||||
Field::new("value", DataType::Int64, true),
|
||||
Field::new(
|
||||
"value",
|
||||
if is_float {
|
||||
DataType::Float64
|
||||
} else {
|
||||
DataType::Int64
|
||||
},
|
||||
true,
|
||||
),
|
||||
Field::new("host", DataType::Utf8, true),
|
||||
]));
|
||||
let timestamp_column = Arc::new(TimestampMillisecondArray::from(vec![
|
||||
// host 1 every 5s
|
||||
0, 5_000, 10_000, 15_000, 20_000, 25_000, 30_000, 35_000, 40_000,
|
||||
// host 2 every 5s
|
||||
0, 5_000, 10_000, 15_000, 20_000, 25_000, 30_000, 35_000, 40_000,
|
||||
let timestamp_column: Arc<dyn Array> = Arc::new(TimestampMillisecondArray::from(vec![
|
||||
0, 5_000, 10_000, 15_000, 20_000, // host 1 every 5s
|
||||
0, 5_000, 10_000, 15_000, 20_000, // host 2 every 5s
|
||||
])) as _;
|
||||
let values = vec![
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, // data for host 1
|
||||
9, 10, 11, 12, 13, 14, 15, 16, 17, // data for host 2
|
||||
];
|
||||
let mut host = vec!["host1"; 9];
|
||||
host.extend(vec!["host2"; 9]);
|
||||
let value_column = Arc::new(Int64Array::from(values)) as _;
|
||||
let host_column = Arc::new(StringArray::from(host)) as _;
|
||||
let mut host = vec!["host1"; 5];
|
||||
host.extend(vec!["host2"; 5]);
|
||||
let value_column: Arc<dyn Array> = if is_float {
|
||||
Arc::new(nullable_array!(Float64;
|
||||
0.0, null, 1.0, null, 2.0, // data for host 1
|
||||
3.0, null, 4.0, null, 5.0 // data for host 2
|
||||
)) as _
|
||||
} else {
|
||||
Arc::new(nullable_array!(Int64;
|
||||
0, null, 1, null, 2, // data for host 1
|
||||
3, null, 4, null, 5 // data for host 2
|
||||
)) as _
|
||||
};
|
||||
let host_column: Arc<dyn Array> = Arc::new(StringArray::from(host)) as _;
|
||||
let data = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![timestamp_column, value_column, host_column],
|
||||
@@ -787,12 +1008,25 @@ mod test {
|
||||
range1: Millisecond,
|
||||
range2: Millisecond,
|
||||
align: Millisecond,
|
||||
fill: Fill,
|
||||
is_float: bool,
|
||||
expected: String,
|
||||
) {
|
||||
let memory_exec = Arc::new(prepare_test_data());
|
||||
let data_type = if is_float {
|
||||
DataType::Float64
|
||||
} else {
|
||||
DataType::Int64
|
||||
};
|
||||
let (need_cast, schema_data_type) = if !is_float && fill == Fill::Linear {
|
||||
// data_type = DataType::Float64;
|
||||
(Some(DataType::Float64), DataType::Float64)
|
||||
} else {
|
||||
(None, data_type.clone())
|
||||
};
|
||||
let memory_exec = Arc::new(prepare_test_data(is_float));
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("MIN(value)", DataType::Int64, true),
|
||||
Field::new("MAX(value)", DataType::Int64, true),
|
||||
Field::new("MIN(value)", schema_data_type.clone(), true),
|
||||
Field::new("MAX(value)", schema_data_type, true),
|
||||
Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
|
||||
Field::new("host", DataType::Utf8, true),
|
||||
]));
|
||||
@@ -803,19 +1037,23 @@ mod test {
|
||||
expr: Arc::new(expressions::Min::new(
|
||||
Arc::new(Column::new("value", 1)),
|
||||
"MIN(value)",
|
||||
DataType::Int64,
|
||||
data_type.clone(),
|
||||
)),
|
||||
args: vec![Arc::new(Column::new("value", 1))],
|
||||
range: range1,
|
||||
fill: fill.clone(),
|
||||
need_cast: need_cast.clone(),
|
||||
},
|
||||
RangeFnExec {
|
||||
expr: Arc::new(expressions::Max::new(
|
||||
Arc::new(Column::new("value", 1)),
|
||||
"MAX(value)",
|
||||
DataType::Int64,
|
||||
data_type,
|
||||
)),
|
||||
args: vec![Arc::new(Column::new("value", 1))],
|
||||
range: range2,
|
||||
fill,
|
||||
need_cast,
|
||||
},
|
||||
],
|
||||
align,
|
||||
@@ -852,85 +1090,225 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result_literal = datatypes::arrow::util::pretty::pretty_format_batches(&result)
|
||||
let result_literal = arrow::util::pretty::pretty_format_batches(&result)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
|
||||
assert_eq!(result_literal, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_10s_align_5s() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0 | 0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0 | 1 | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1 | 2 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 2 | 3 | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 3 | 4 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 4 | 5 | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 5 | 6 | 1970-01-01T00:00:30 | host1 |\
|
||||
\n| 6 | 7 | 1970-01-01T00:00:35 | host1 |\
|
||||
\n| 7 | 8 | 1970-01-01T00:00:40 | host1 |\
|
||||
\n| 8 | 8 | 1970-01-01T00:00:45 | host1 |\
|
||||
\n| 9 | 9 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 9 | 10 | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 10 | 11 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 11 | 12 | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 12 | 13 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 13 | 14 | 1970-01-01T00:00:25 | host2 |\
|
||||
\n| 14 | 15 | 1970-01-01T00:00:30 | host2 |\
|
||||
\n| 15 | 16 | 1970-01-01T00:00:35 | host2 |\
|
||||
\n| 16 | 17 | 1970-01-01T00:00:40 | host2 |\
|
||||
\n| 17 | 17 | 1970-01-01T00:00:45 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 10_000, 5_000, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_10s_align_1000s() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0 | 0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 9 | 9 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 10_000, 1_000_000, expected).await;
|
||||
do_range_select_test(10_000, 10_000, 1_000_000, Fill::Null, true, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_10s_5s_align_5s() {
|
||||
async fn range_fill_null() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0.0 | | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 1.0 | | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 2.0 | | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 3.0 | | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 4.0 | | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 5.0 | | 1970-01-01T00:00:25 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 5_000, 5_000, Fill::Null, true, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_fill_prev() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 1.0 | 1.0 | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 2.0 | 2.0 | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 4.0 | 4.0 | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 5.0 | 5.0 | 1970-01-01T00:00:25 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 5_000, 5_000, Fill::Prev, true, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_fill_linear() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0.0 | 0.5 | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 1.0 | 1.5 | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 2.0 | | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 3.0 | 3.5 | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 4.0 | 4.5 | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 5.0 | | 1970-01-01T00:00:25 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 5_000, 5_000, Fill::Linear, true, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_fill_integer_null() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0 | 0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0 | 1 | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1 | 2 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 2 | 3 | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 3 | 4 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 4 | 5 | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 5 | 6 | 1970-01-01T00:00:30 | host1 |\
|
||||
\n| 6 | 7 | 1970-01-01T00:00:35 | host1 |\
|
||||
\n| 7 | 8 | 1970-01-01T00:00:40 | host1 |\
|
||||
\n| 8 | | 1970-01-01T00:00:45 | host1 |\
|
||||
\n| 9 | 9 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 9 | 10 | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 10 | 11 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 11 | 12 | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 12 | 13 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 13 | 14 | 1970-01-01T00:00:25 | host2 |\
|
||||
\n| 14 | 15 | 1970-01-01T00:00:30 | host2 |\
|
||||
\n| 15 | 16 | 1970-01-01T00:00:35 | host2 |\
|
||||
\n| 16 | 17 | 1970-01-01T00:00:40 | host2 |\
|
||||
\n| 17 | | 1970-01-01T00:00:45 | host2 |\
|
||||
\n| 0 | | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1 | 1 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 1 | | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 2 | 2 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 2 | | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 3 | 3 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 3 | | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 4 | 4 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 4 | | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 5 | 5 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 5 | | 1970-01-01T00:00:25 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 5_000, 5_000, expected).await;
|
||||
do_range_select_test(10_000, 5_000, 5_000, Fill::Null, false, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_fill_integer_linear() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0.0 | 0.5 | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 1.0 | 1.5 | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 2.0 | | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 3.0 | 3.5 | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 4.0 | 4.5 | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 5.0 | | 1970-01-01T00:00:25 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(10_000, 5_000, 5_000, Fill::Linear, false, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_fill_const() {
|
||||
let expected = String::from(
|
||||
"+------------+------------+---------------------+-------+\
|
||||
\n| MIN(value) | MAX(value) | timestamp | host |\
|
||||
\n+------------+------------+---------------------+-------+\
|
||||
\n| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 |\
|
||||
\n| 0.0 | 6.6 | 1970-01-01T00:00:05 | host1 |\
|
||||
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
|
||||
\n| 1.0 | 6.6 | 1970-01-01T00:00:15 | host1 |\
|
||||
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
|
||||
\n| 2.0 | 6.6 | 1970-01-01T00:00:25 | host1 |\
|
||||
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
|
||||
\n| 3.0 | 6.6 | 1970-01-01T00:00:05 | host2 |\
|
||||
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
|
||||
\n| 4.0 | 6.6 | 1970-01-01T00:00:15 | host2 |\
|
||||
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
|
||||
\n| 5.0 | 6.6 | 1970-01-01T00:00:25 | host2 |\
|
||||
\n+------------+------------+---------------------+-------+",
|
||||
);
|
||||
do_range_select_test(
|
||||
10_000,
|
||||
5_000,
|
||||
5_000,
|
||||
Fill::Const(ScalarValue::Float64(Some(6.6))),
|
||||
true,
|
||||
expected,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fill_test() {
|
||||
assert!(Fill::try_from_str("Linear", &DataType::UInt8).unwrap() == Fill::Linear);
|
||||
assert_eq!(
|
||||
Fill::try_from_str("Linear", &DataType::Boolean)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"Error during planning: Use FILL LINEAR on Non-numeric DataType Boolean"
|
||||
);
|
||||
assert_eq!(
|
||||
Fill::try_from_str("WHAT", &DataType::UInt8)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"Error during planning: WHAT is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string 'WHAT' to value of UInt8 type }"
|
||||
);
|
||||
assert_eq!(
|
||||
Fill::try_from_str("8.0", &DataType::UInt8)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"Error during planning: 8.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '8.0' to value of UInt8 type }"
|
||||
);
|
||||
assert!(
|
||||
Fill::try_from_str("8", &DataType::UInt8).unwrap()
|
||||
== Fill::Const(ScalarValue::UInt8(Some(8)))
|
||||
);
|
||||
let mut test1 = vec![
|
||||
ScalarValue::UInt8(Some(8)),
|
||||
ScalarValue::UInt8(None),
|
||||
ScalarValue::UInt8(Some(9)),
|
||||
];
|
||||
Fill::Null.apply_fill_strategy(&mut test1).unwrap();
|
||||
assert_eq!(test1[1], ScalarValue::UInt8(None));
|
||||
Fill::Prev.apply_fill_strategy(&mut test1).unwrap();
|
||||
assert_eq!(test1[1], ScalarValue::UInt8(Some(8)));
|
||||
test1[1] = ScalarValue::UInt8(None);
|
||||
Fill::Const(ScalarValue::UInt8(Some(10)))
|
||||
.apply_fill_strategy(&mut test1)
|
||||
.unwrap();
|
||||
assert_eq!(test1[1], ScalarValue::UInt8(Some(10)));
|
||||
test1[1] = ScalarValue::UInt8(None);
|
||||
assert_eq!(
|
||||
Fill::Linear
|
||||
.apply_fill_strategy(&mut test1)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"Execution error: RangePlan: Apply Fill LINEAR strategy on Non-floating type"
|
||||
);
|
||||
let mut test2 = vec![
|
||||
ScalarValue::Float32(Some(8.0)),
|
||||
ScalarValue::Float32(None),
|
||||
ScalarValue::Float32(Some(9.0)),
|
||||
];
|
||||
Fill::Linear.apply_fill_strategy(&mut test2).unwrap();
|
||||
assert_eq!(test2[1], ScalarValue::Float32(Some(8.5)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::FromStr;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use arrow_schema::DataType;
|
||||
use async_recursion::async_recursion;
|
||||
use catalog::table_source::DfTableSourceProvider;
|
||||
use datafusion::datasource::DefaultTableSource;
|
||||
@@ -23,47 +24,62 @@ use datafusion::prelude::Column;
|
||||
use datafusion::scalar::ScalarValue;
|
||||
use datafusion_common::tree_node::{TreeNode, TreeNodeRewriter, VisitRecursion};
|
||||
use datafusion_common::{DFSchema, DataFusionError, Result as DFResult};
|
||||
use datafusion_expr::expr::{AggregateFunction, AggregateUDF, ScalarUDF};
|
||||
use datafusion_expr::expr::ScalarUDF;
|
||||
use datafusion_expr::{
|
||||
AggregateFunction as AggregateFn, Expr, Extension, LogicalPlan, LogicalPlanBuilder, Projection,
|
||||
Aggregate, Expr, ExprSchemable, Extension, LogicalPlan, LogicalPlanBuilder, Projection,
|
||||
};
|
||||
use datafusion_sql::planner::ContextProvider;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use promql_parser::util::parse_duration;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use super::plan::Fill;
|
||||
use crate::error::{
|
||||
CatalogSnafu, DataFusionSnafu, Result, TimeIndexNotFoundSnafu, UnknownTableSnafu,
|
||||
CatalogSnafu, DataFusionSnafu, RangeQuerySnafu, Result, TimeIndexNotFoundSnafu,
|
||||
UnknownTableSnafu,
|
||||
};
|
||||
use crate::range_select::plan::{RangeFn, RangeSelect};
|
||||
use crate::DfContextProviderAdapter;
|
||||
|
||||
/// `RangeExprRewriter` will recursively search certain `Expr`, find all `range_fn` scalar udf contained in `Expr`,
|
||||
/// and collect the information required by the RangeSelect query,
|
||||
/// and finally modify the `range_fn` scalar udf to an ordinary column field.
|
||||
pub struct RangeExprRewriter<'a> {
|
||||
input_plan: &'a Arc<LogicalPlan>,
|
||||
align: Duration,
|
||||
by: Vec<Expr>,
|
||||
range_fn: Vec<RangeFn>,
|
||||
context_provider: &'a DfContextProviderAdapter,
|
||||
/// Use `BTreeSet` to avoid in case like `avg(a) RANGE '5m' + avg(a) RANGE '5m'`, duplicate range expr `avg(a) RANGE '5m'` be calculate twice
|
||||
range_fn: BTreeSet<RangeFn>,
|
||||
sub_aggr: &'a Aggregate,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn dispose_parse_error(expr: Option<&Expr>) -> DataFusionError {
|
||||
DataFusionError::Plan(
|
||||
expr.map(|x| {
|
||||
format!(
|
||||
"Illegal argument `{}` in range select query",
|
||||
x.display_name().unwrap_or_default()
|
||||
)
|
||||
})
|
||||
.unwrap_or("Missing argument in range select query".into()),
|
||||
)
|
||||
}
|
||||
|
||||
impl<'a> RangeExprRewriter<'a> {
|
||||
pub fn gen_range_expr(&self, func_name: &str, args: Vec<Expr>) -> DFResult<Expr> {
|
||||
match AggregateFn::from_str(func_name) {
|
||||
Ok(agg_fn) => Ok(Expr::AggregateFunction(AggregateFunction::new(
|
||||
agg_fn, args, false, None, None,
|
||||
))),
|
||||
Err(_) => match self.context_provider.get_aggregate_meta(func_name) {
|
||||
Some(agg_udf) => Ok(Expr::AggregateUDF(AggregateUDF::new(
|
||||
agg_udf, args, None, None,
|
||||
))),
|
||||
None => Err(DataFusionError::Plan(format!(
|
||||
"{} is not a Aggregate function or a Aggregate UDF",
|
||||
func_name
|
||||
))),
|
||||
},
|
||||
pub fn get_range_expr(&self, args: &[Expr], i: usize) -> DFResult<Expr> {
|
||||
match args.get(i) {
|
||||
Some(Expr::Column(column)) => {
|
||||
let index = self.sub_aggr.schema.index_of_column(column)?;
|
||||
let len = self.sub_aggr.group_expr.len();
|
||||
self.sub_aggr
|
||||
.aggr_expr
|
||||
.get(index - len)
|
||||
.cloned()
|
||||
.ok_or(DataFusionError::Plan(
|
||||
"Range expr not found in underlying Aggregate Plan".into(),
|
||||
))
|
||||
}
|
||||
other => Err(dispose_parse_error(other)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,9 +87,7 @@ impl<'a> RangeExprRewriter<'a> {
|
||||
fn parse_str_expr(args: &[Expr], i: usize) -> DFResult<&str> {
|
||||
match args.get(i) {
|
||||
Some(Expr::Literal(ScalarValue::Utf8(Some(str)))) => Ok(str.as_str()),
|
||||
_ => Err(DataFusionError::Plan(
|
||||
"Illegal argument in range select query".into(),
|
||||
)),
|
||||
other => Err(dispose_parse_error(other)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,10 +102,8 @@ fn parse_expr_list(args: &[Expr], start: usize, len: usize) -> DFResult<Vec<Expr
|
||||
| Expr::ScalarFunction(_)
|
||||
| Expr::ScalarUDF(_),
|
||||
) => args[i].clone(),
|
||||
_ => {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Illegal expr argument in range select query".into(),
|
||||
))
|
||||
other => {
|
||||
return Err(dispose_parse_error(*other));
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -104,23 +116,22 @@ impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
|
||||
fn mutate(&mut self, node: Expr) -> DFResult<Expr> {
|
||||
if let Expr::ScalarUDF(func) = &node {
|
||||
if func.fun.name == "range_fn" {
|
||||
// `range_fn(func_name, argc, [argv], range, fill, byc, [byv], align)`
|
||||
// `argsv` and `byv` are variadic arguments, argc/byc indicate the length of arguments
|
||||
let func_name = parse_str_expr(&func.args, 0)?;
|
||||
let argc = str::parse::<usize>(parse_str_expr(&func.args, 1)?)
|
||||
// `range_fn(func, range, fill, byc, [byv], align)`
|
||||
// `[byv]` are variadic arguments, byc indicate the length of arguments
|
||||
let range_expr = self.get_range_expr(&func.args, 0)?;
|
||||
let range_str = parse_str_expr(&func.args, 1)?;
|
||||
let byc = str::parse::<usize>(parse_str_expr(&func.args, 3)?)
|
||||
.map_err(|e| DataFusionError::Plan(e.to_string()))?;
|
||||
let byc = str::parse::<usize>(parse_str_expr(&func.args, argc + 4)?)
|
||||
.map_err(|e| DataFusionError::Plan(e.to_string()))?;
|
||||
let mut range_fn = RangeFn {
|
||||
expr: Expr::Wildcard,
|
||||
range: parse_duration(parse_str_expr(&func.args, argc + 2)?)
|
||||
.map_err(DataFusionError::Plan)?,
|
||||
fill: parse_str_expr(&func.args, argc + 3)?.to_string(),
|
||||
};
|
||||
let args = parse_expr_list(&func.args, 2, argc)?;
|
||||
let by = parse_expr_list(&func.args, argc + 5, byc)?;
|
||||
let align = parse_duration(parse_str_expr(&func.args, argc + byc + 5)?)
|
||||
let by = parse_expr_list(&func.args, 4, byc)?;
|
||||
let align = parse_duration(parse_str_expr(&func.args, byc + 4)?)
|
||||
.map_err(DataFusionError::Plan)?;
|
||||
let mut data_type = range_expr.get_type(self.input_plan.schema())?;
|
||||
let mut need_cast = false;
|
||||
let fill = Fill::try_from_str(parse_str_expr(&func.args, 2)?, &data_type)?;
|
||||
if matches!(fill, Fill::Linear) && data_type.is_integer() {
|
||||
data_type = DataType::Float64;
|
||||
need_cast = true;
|
||||
}
|
||||
if !self.by.is_empty() && self.by != by {
|
||||
return Err(DataFusionError::Plan(
|
||||
"Inconsistent by given in Range Function Rewrite".into(),
|
||||
@@ -135,9 +146,21 @@ impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
|
||||
} else {
|
||||
self.align = align;
|
||||
}
|
||||
range_fn.expr = self.gen_range_expr(func_name, args)?;
|
||||
let alias = Expr::Column(Column::from_name(range_fn.expr.display_name()?));
|
||||
self.range_fn.push(range_fn);
|
||||
let range_fn = RangeFn {
|
||||
name: format!(
|
||||
"{} RANGE {} FILL {}",
|
||||
range_expr.display_name()?,
|
||||
range_str,
|
||||
fill
|
||||
),
|
||||
data_type,
|
||||
expr: range_expr,
|
||||
range: parse_duration(range_str).map_err(DataFusionError::Plan)?,
|
||||
fill,
|
||||
need_cast,
|
||||
};
|
||||
let alias = Expr::Column(Column::from_name(range_fn.name.clone()));
|
||||
self.range_fn.insert(range_fn);
|
||||
return Ok(alias);
|
||||
}
|
||||
}
|
||||
@@ -146,25 +169,18 @@ impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
|
||||
}
|
||||
|
||||
/// In order to implement RangeSelect query like `avg(field_0) RANGE '5m' FILL NULL`,
|
||||
/// All RangeSelect query items are converted into udf scalar function in sql parse stage, with format like `range_fn('avg', .....)`.
|
||||
/// All RangeSelect query items are converted into udf scalar function in sql parse stage, with format like `range_fn(avg(field_0), .....)`.
|
||||
/// `range_fn` contains all the parameters we need to execute RangeSelect.
|
||||
/// In order to correctly execute the query process of range select, we need to modify the query plan generated by datafusion.
|
||||
/// We need to recursively find the entire LogicalPlan, and find all `range_fn` scalar udf contained in the project plan,
|
||||
/// collecting info we need to generate RangeSelect Query LogicalPlan and rewrite th original LogicalPlan.
|
||||
pub struct RangePlanRewriter {
|
||||
table_provider: DfTableSourceProvider,
|
||||
context_provider: DfContextProviderAdapter,
|
||||
}
|
||||
|
||||
impl RangePlanRewriter {
|
||||
pub fn new(
|
||||
table_provider: DfTableSourceProvider,
|
||||
context_provider: DfContextProviderAdapter,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_provider,
|
||||
context_provider,
|
||||
}
|
||||
pub fn new(table_provider: DfTableSourceProvider) -> Self {
|
||||
Self { table_provider }
|
||||
}
|
||||
|
||||
pub async fn rewrite(&mut self, plan: LogicalPlan) -> Result<LogicalPlan> {
|
||||
@@ -185,17 +201,28 @@ impl RangePlanRewriter {
|
||||
LogicalPlan::Projection(Projection { expr, input, .. })
|
||||
if have_range_in_exprs(expr) =>
|
||||
{
|
||||
let input = if let Some(new_input) = new_inputs[0].take() {
|
||||
Arc::new(new_input)
|
||||
let (aggr_plan, input) = if let LogicalPlan::Aggregate(aggr) = input.as_ref() {
|
||||
// Expr like `rate(max(a) RANGE '6m') RANGE '6m'` have legal syntax but illegal semantic.
|
||||
if have_range_in_exprs(&aggr.aggr_expr) {
|
||||
return RangeQuerySnafu {
|
||||
msg: "Nest Range Query is not allowed",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
(aggr, aggr.input.clone())
|
||||
} else {
|
||||
input.clone()
|
||||
return RangeQuerySnafu {
|
||||
msg: "Window functions is not allowed in Range Query",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let (time_index, default_by) = self.get_index_by(input.schema().clone()).await?;
|
||||
let (time_index, default_by) = self.get_index_by(input.schema()).await?;
|
||||
let mut range_rewriter = RangeExprRewriter {
|
||||
input_plan: &input,
|
||||
align: Duration::default(),
|
||||
by: vec![],
|
||||
range_fn: vec![],
|
||||
context_provider: &self.context_provider,
|
||||
range_fn: BTreeSet::new(),
|
||||
sub_aggr: aggr_plan,
|
||||
};
|
||||
let new_expr = expr
|
||||
.iter()
|
||||
@@ -207,7 +234,7 @@ impl RangePlanRewriter {
|
||||
}
|
||||
let range_select = RangeSelect::try_new(
|
||||
input.clone(),
|
||||
range_rewriter.range_fn,
|
||||
range_rewriter.range_fn.into_iter().collect(),
|
||||
range_rewriter.align,
|
||||
time_index,
|
||||
range_rewriter.by,
|
||||
@@ -252,7 +279,7 @@ impl RangePlanRewriter {
|
||||
/// return `(time_index, [row_columns])` to the rewriter.
|
||||
/// If the user does not explicitly use the `by` keyword to indicate time series,
|
||||
/// `[row_columns]` will be use as default time series
|
||||
async fn get_index_by(&mut self, schema: Arc<DFSchema>) -> Result<(Expr, Vec<Expr>)> {
|
||||
async fn get_index_by(&mut self, schema: &Arc<DFSchema>) -> Result<(Expr, Vec<Expr>)> {
|
||||
let mut time_index_expr = Expr::Wildcard;
|
||||
let mut default_by = vec![];
|
||||
for field in schema.fields() {
|
||||
@@ -303,28 +330,27 @@ impl RangePlanRewriter {
|
||||
}
|
||||
}
|
||||
|
||||
fn have_range_in_exprs(exprs: &Vec<Expr>) -> bool {
|
||||
let mut have = false;
|
||||
for expr in exprs {
|
||||
fn have_range_in_exprs(exprs: &[Expr]) -> bool {
|
||||
exprs.iter().any(|expr| {
|
||||
let mut find_range = false;
|
||||
let _ = expr.apply(&mut |expr| {
|
||||
if let Expr::ScalarUDF(ScalarUDF { fun, .. }) = expr {
|
||||
if fun.name == "range_fn" {
|
||||
have = true;
|
||||
find_range = true;
|
||||
return Ok(VisitRecursion::Stop);
|
||||
}
|
||||
}
|
||||
Ok(VisitRecursion::Continue)
|
||||
});
|
||||
if have {
|
||||
break;
|
||||
}
|
||||
}
|
||||
have
|
||||
find_range
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::error::Error;
|
||||
|
||||
use catalog::memory::MemoryCatalogManager;
|
||||
use catalog::RegisterTableRequest;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
@@ -391,14 +417,14 @@ mod test {
|
||||
QueryEngineFactory::new(catalog_list, None, None, false).query_engine()
|
||||
}
|
||||
|
||||
async fn query_plan_compare(sql: &str, expected: String) {
|
||||
async fn do_query(sql: &str) -> Result<crate::plan::LogicalPlan> {
|
||||
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
|
||||
let engine = create_test_engine().await;
|
||||
let GreptimeLogicalPlan::DfPlan(plan) = engine
|
||||
.planner()
|
||||
.plan(stmt, QueryContext::arc())
|
||||
.await
|
||||
.unwrap();
|
||||
engine.planner().plan(stmt, QueryContext::arc()).await
|
||||
}
|
||||
|
||||
async fn query_plan_compare(sql: &str, expected: String) {
|
||||
let GreptimeLogicalPlan::DfPlan(plan) = do_query(sql).await.unwrap();
|
||||
assert_eq!(plan.display_indent_schema().to_string(), expected);
|
||||
}
|
||||
|
||||
@@ -406,7 +432,7 @@ mod test {
|
||||
async fn range_no_project() {
|
||||
let query = r#"SELECT timestamp, tag_0, tag_1, avg(field_0 + field_1) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"RangeSelect: range_exprs=[RangeFn { expr:AVG(test.field_0 + test.field_1) range:300s fill: }], align=3600s time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, AVG(test.field_0 + test.field_1):Float64;N]\
|
||||
"RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -414,11 +440,10 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_expr_calculation() {
|
||||
let query =
|
||||
r#"SELECT avg(field_0 + field_1)/4 RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let query = r#"SELECT (avg(field_0 + field_1)/4) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: AVG(test.field_0 + test.field_1) / Int64(4) [AVG(test.field_0 + test.field_1) / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[RangeFn { expr:AVG(test.field_0 + test.field_1) range:300s fill: }], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1):Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
"Projection: AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL / Int64(4) [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -427,10 +452,10 @@ mod test {
|
||||
#[tokio::test]
|
||||
async fn range_multi_args() {
|
||||
let query =
|
||||
r#"SELECT covar(field_0 + field_1, field_1)/4 RANGE '5m' FROM test ALIGN '1h';"#;
|
||||
r#"SELECT (covar(field_0 + field_1, field_1)/4) RANGE '5m' FROM test ALIGN '1h';"#;
|
||||
let expected = String::from(
|
||||
"Projection: COVARIANCE(test.field_0 + test.field_1,test.field_1) / Int64(4) [COVARIANCE(test.field_0 + test.field_1,test.field_1) / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[RangeFn { expr:COVARIANCE(test.field_0 + test.field_1,test.field_1) range:300s fill: }], align=3600s time_index=timestamp [COVARIANCE(test.field_0 + test.field_1,test.field_1):Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
|
||||
"Projection: COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL / Int64(4) [COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [COVARIANCE(test.field_0 + test.field_1,test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -438,10 +463,10 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_calculation() {
|
||||
let query = r#"SELECT (avg(field_0)+sum(field_1))/4 RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1) FILL NULL;"#;
|
||||
let query = r#"SELECT ((avg(field_0)+sum(field_1))/4) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1) FILL NULL;"#;
|
||||
let expected = String::from(
|
||||
"Projection: (AVG(test.field_0) + SUM(test.field_1)) / Int64(4) [AVG(test.field_0) + SUM(test.field_1) / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[RangeFn { expr:AVG(test.field_0) range:300s fill:NULL }, RangeFn { expr:SUM(test.field_1) range:300s fill:NULL }], align=3600s time_index=timestamp [AVG(test.field_0):Float64;N, SUM(test.field_1):Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
"Projection: (AVG(test.field_0) RANGE 5m FILL NULL + SUM(test.field_1) RANGE 5m FILL NULL) / Int64(4) [AVG(test.field_0) RANGE 5m FILL NULL + SUM(test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL NULL, SUM(test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0) RANGE 5m FILL NULL:Float64;N, SUM(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -449,12 +474,12 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_as_sub_query() {
|
||||
let query = r#"SELECT foo + 1 from (SELECT (avg(field_0)+sum(field_1))/4 RANGE '5m' as foo FROM test ALIGN '1h' by (tag_0,tag_1) FILL NULL) where foo > 1;"#;
|
||||
let query = r#"SELECT foo + 1 from (SELECT ((avg(field_0)+sum(field_1))/4) RANGE '5m' as foo FROM test ALIGN '1h' by (tag_0,tag_1) FILL NULL) where foo > 1;"#;
|
||||
let expected = String::from(
|
||||
"Projection: foo + Int64(1) [foo + Int64(1):Float64;N]\
|
||||
\n Filter: foo > Int64(1) [foo:Float64;N]\
|
||||
\n Projection: (AVG(test.field_0) + SUM(test.field_1)) / Int64(4) AS foo [foo:Float64;N]\
|
||||
\n RangeSelect: range_exprs=[RangeFn { expr:AVG(test.field_0) range:300s fill:NULL }, RangeFn { expr:SUM(test.field_1) range:300s fill:NULL }], align=3600s time_index=timestamp [AVG(test.field_0):Float64;N, SUM(test.field_1):Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n Projection: (AVG(test.field_0) RANGE 5m FILL NULL + SUM(test.field_1) RANGE 5m FILL NULL) / Int64(4) AS foo [foo:Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL NULL, SUM(test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0) RANGE 5m FILL NULL:Float64;N, SUM(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
@@ -462,14 +487,109 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_from_nest_query() {
|
||||
let query = r#"SELECT (avg(a)+sum(b))/4 RANGE '5m' FROM (SELECT field_0 as a, field_1 as b, tag_0 as c, tag_1 as d, timestamp from test where field_0 > 1.0) ALIGN '1h' by (c, d) FILL NULL;"#;
|
||||
let query = r#"SELECT ((avg(a)+sum(b))/4) RANGE '5m' FROM (SELECT field_0 as a, field_1 as b, tag_0 as c, tag_1 as d, timestamp from test where field_0 > 1.0) ALIGN '1h' by (c, d) FILL NULL;"#;
|
||||
let expected = String::from(
|
||||
"Projection: (AVG(a) + SUM(b)) / Int64(4) [AVG(a) + SUM(b) / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[RangeFn { expr:AVG(a) range:300s fill:NULL }, RangeFn { expr:SUM(b) range:300s fill:NULL }], align=3600s time_index=timestamp [AVG(a):Float64;N, SUM(b):Float64;N, timestamp:Timestamp(Millisecond, None), c:Utf8, d:Utf8]\
|
||||
"Projection: (AVG(a) RANGE 5m FILL NULL + SUM(b) RANGE 5m FILL NULL) / Int64(4) [AVG(a) RANGE 5m FILL NULL + SUM(b) RANGE 5m FILL NULL / Int64(4):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(a) RANGE 5m FILL NULL, SUM(b) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(a) RANGE 5m FILL NULL:Float64;N, SUM(b) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), c:Utf8, d:Utf8]\
|
||||
\n Projection: test.field_0 AS a, test.field_1 AS b, test.tag_0 AS c, test.tag_1 AS d, test.timestamp [a:Float64;N, b:Float64;N, c:Utf8, d:Utf8, timestamp:Timestamp(Millisecond, None)]\
|
||||
\n Filter: test.field_0 > Float64(1) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_in_expr() {
|
||||
let query = r#"SELECT sin(avg(field_0 + field_1) RANGE '5m' + 1) FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1)) [sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1)):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn duplicate_range_expr() {
|
||||
let query = r#"SELECT avg(field_0) RANGE '5m' FILL 6.0 + avg(field_0) RANGE '5m' FILL 6.0 FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: AVG(test.field_0) RANGE 5m FILL 6 + AVG(test.field_0) RANGE 5m FILL 6 [AVG(test.field_0) RANGE 5m FILL 6 + AVG(test.field_0) RANGE 5m FILL 6:Float64]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0) RANGE 5m FILL 6], align=3600s time_index=timestamp [AVG(test.field_0) RANGE 5m FILL 6:Float64, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deep_nest_range_expr() {
|
||||
let query = r#"SELECT round(sin(avg(field_0 + field_1) RANGE '5m' + 1)) FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: round(sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1))) [round(sin(AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL + Int64(1))):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn complex_range_expr() {
|
||||
let query = r#"SELECT gcd(CAST(max(field_0 + 1) Range '5m' FILL NULL AS Int64), CAST(tag_0 AS Int64)) + round(max(field_2+1) Range '6m' FILL NULL + 1) + max(field_2+3) Range '10m' FILL NULL * CAST(tag_1 AS Float64) + 1 FROM test ALIGN '1h' by (tag_0, tag_1);"#;
|
||||
let expected = String::from(
|
||||
"Projection: gcd(CAST(MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL AS Int64), CAST(test.tag_0 AS Int64)) + round(MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL * CAST(test.tag_1 AS Float64) + Int64(1) [gcd(MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL,test.tag_0) + round(MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL * test.tag_1 + Int64(1):Float64;N]\
|
||||
\n RangeSelect: range_exprs=[MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL, MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL, MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL], align=3600s time_index=timestamp [MAX(test.field_0 + Int64(1)) RANGE 5m FILL NULL:Float64;N, MAX(test.field_2 + Int64(1)) RANGE 6m FILL NULL:Float64;N, MAX(test.field_2 + Int64(3)) RANGE 10m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_linear_on_integer() {
|
||||
let query = r#"SELECT min(CAST(field_0 AS Int64) + CAST(field_1 AS Int64)) RANGE '5m' FILL LINEAR FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
let expected = String::from(
|
||||
"RangeSelect: range_exprs=[MIN(test.field_0 + test.field_1) RANGE 5m FILL LINEAR], align=3600s time_index=timestamp [MIN(test.field_0 + test.field_1) RANGE 5m FILL LINEAR:Float64;N]\
|
||||
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
|
||||
);
|
||||
query_plan_compare(query, expected).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_nest_range_err() {
|
||||
let query = r#"SELECT sum(avg(field_0 + field_1) RANGE '5m' + 1) RANGE '5m' + 1 FROM test ALIGN '1h' by (tag_0,tag_1);"#;
|
||||
assert_eq!(
|
||||
do_query(query).await.unwrap_err().to_string(),
|
||||
"Range Query: Nest Range Query is not allowed"
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
/// Start directly from the rewritten SQL and check whether the error reported by the range expression rewriting is as expected.
|
||||
/// the right argument is `range_fn(avg(field_0), '5m', 'NULL', '0', '1h')`
|
||||
async fn range_argument_err_1() {
|
||||
let query = r#"SELECT range_fn('5m', avg(field_0), 'NULL', '1', tag_0, '1h') FROM test group by tag_0;"#;
|
||||
let error = do_query(query)
|
||||
.await
|
||||
.unwrap_err()
|
||||
.source()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
assert_eq!(
|
||||
error,
|
||||
"Error during planning: Illegal argument `Utf8(\"5m\")` in range select query"
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn range_argument_err_2() {
|
||||
let query = r#"SELECT range_fn(avg(field_0), 5, 'NULL', '1', tag_0, '1h') FROM test group by tag_0;"#;
|
||||
let error = do_query(query)
|
||||
.await
|
||||
.unwrap_err()
|
||||
.source()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
assert_eq!(
|
||||
error,
|
||||
"Error during planning: Illegal argument `Int64(5)` in range select query"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,8 +95,8 @@ pub struct GrpcServerConfig {
|
||||
impl Default for GrpcServerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE.as_bytes() as usize,
|
||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE.as_bytes() as usize,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,14 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use common_meta::distributed_time_constants;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct HeartbeatOptions {
|
||||
pub interval_millis: u64,
|
||||
pub retry_interval_millis: u64,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub interval: Duration,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub retry_interval: Duration,
|
||||
}
|
||||
|
||||
impl HeartbeatOptions {
|
||||
@@ -30,8 +34,12 @@ impl HeartbeatOptions {
|
||||
pub fn frontend_default() -> Self {
|
||||
Self {
|
||||
// Frontend can send heartbeat with a longer interval.
|
||||
interval_millis: distributed_time_constants::FRONTEND_HEARTBEAT_INTERVAL_MILLIS,
|
||||
retry_interval_millis: distributed_time_constants::HEARTBEAT_INTERVAL_MILLIS,
|
||||
interval: Duration::from_millis(
|
||||
distributed_time_constants::FRONTEND_HEARTBEAT_INTERVAL_MILLIS,
|
||||
),
|
||||
retry_interval: Duration::from_millis(
|
||||
distributed_time_constants::HEARTBEAT_INTERVAL_MILLIS,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -39,8 +47,10 @@ impl HeartbeatOptions {
|
||||
impl Default for HeartbeatOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_millis: distributed_time_constants::HEARTBEAT_INTERVAL_MILLIS,
|
||||
retry_interval_millis: distributed_time_constants::HEARTBEAT_INTERVAL_MILLIS,
|
||||
interval: Duration::from_millis(distributed_time_constants::HEARTBEAT_INTERVAL_MILLIS),
|
||||
retry_interval: Duration::from_millis(
|
||||
distributed_time_constants::HEARTBEAT_INTERVAL_MILLIS,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::ops::Deref;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datatypes::prelude::{ConcreteDataType, Value};
|
||||
@@ -28,7 +28,7 @@ use session::context::QueryContextRef;
|
||||
use snafu::prelude::*;
|
||||
use tokio::io::AsyncWrite;
|
||||
|
||||
use crate::error::{self, Error, OtherSnafu, Result};
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::metrics::*;
|
||||
|
||||
/// Try to write multiple output to the writer if possible.
|
||||
@@ -148,7 +148,11 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
|
||||
.await?
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e).map_err(BoxedError::new).context(OtherSnafu);
|
||||
let err = e.to_string();
|
||||
row_writer
|
||||
.finish_error(ErrorKind::ER_INTERNAL_ERROR, &err.as_bytes())
|
||||
.await?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,8 @@ pub struct TableReference<'a> {
|
||||
pub table: &'a str,
|
||||
}
|
||||
|
||||
pub type OwnedTableReference = TableReference<'static>;
|
||||
|
||||
// TODO(LFC): Find a better place for `TableReference`,
|
||||
// so that we can reuse the default catalog and schema consts.
|
||||
// Could be done together with issue #559.
|
||||
|
||||
@@ -92,7 +92,7 @@ impl GreptimeDbStandaloneBuilder {
|
||||
.init()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
procedure_manager.start().await.unwrap();
|
||||
let instance = Instance::try_new_standalone(
|
||||
kv_store,
|
||||
procedure_manager,
|
||||
|
||||
@@ -610,13 +610,13 @@ node_id = 0
|
||||
require_lease_before_startup = true
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
rpc_runtime_size = 8
|
||||
rpc_max_recv_message_size = 536870912
|
||||
rpc_max_send_message_size = 536870912
|
||||
rpc_max_recv_message_size = "512MiB"
|
||||
rpc_max_send_message_size = "512MiB"
|
||||
enable_telemetry = true
|
||||
|
||||
[heartbeat]
|
||||
interval_millis = 3000
|
||||
retry_interval_millis = 3000
|
||||
interval = "3s"
|
||||
retry_interval = "3s"
|
||||
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
54
tests/cases/standalone/common/range/by.result
Normal file
54
tests/cases/standalone/common/range/by.result
Normal file
@@ -0,0 +1,54 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- Test by calculate
|
||||
SELECT ts, length(host), max(val) RANGE '5s' FROM host ALIGN '20s' BY (length(host)) ORDER BY ts;
|
||||
|
||||
+---------------------+-----------------------------+----------------------------------+
|
||||
| ts | character_length(host.host) | MAX(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+-----------------------------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | 5 | 3 |
|
||||
| 1970-01-01T00:00:20 | 5 | 5 |
|
||||
+---------------------+-----------------------------+----------------------------------+
|
||||
|
||||
SELECT ts, max(val) RANGE '5s' FROM host ALIGN '20s' BY (2) ORDER BY ts;
|
||||
|
||||
+---------------------+----------------------------------+
|
||||
| ts | MAX(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | 3 |
|
||||
| 1970-01-01T00:00:20 | 5 |
|
||||
+---------------------+----------------------------------+
|
||||
|
||||
SELECT ts, CAST(length(host) as INT64) + 2, max(val) RANGE '5s' FROM host ALIGN '20s' BY (CAST(length(host) as INT64) + 2) ORDER BY ts;
|
||||
|
||||
+---------------------+----------------------------------------+----------------------------------+
|
||||
| ts | character_length(host.host) + Int64(2) | MAX(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+----------------------------------------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | 7 | 3 |
|
||||
| 1970-01-01T00:00:20 | 7 | 5 |
|
||||
+---------------------+----------------------------------------+----------------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
27
tests/cases/standalone/common/range/by.sql
Normal file
27
tests/cases/standalone/common/range/by.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
-- Test by calculate
|
||||
|
||||
SELECT ts, length(host), max(val) RANGE '5s' FROM host ALIGN '20s' BY (length(host)) ORDER BY ts;
|
||||
|
||||
SELECT ts, max(val) RANGE '5s' FROM host ALIGN '20s' BY (2) ORDER BY ts;
|
||||
|
||||
SELECT ts, CAST(length(host) as INT64) + 2, max(val) RANGE '5s' FROM host ALIGN '20s' BY (CAST(length(host) as INT64) + 2) ORDER BY ts;
|
||||
|
||||
DROP TABLE host;
|
||||
194
tests/cases/standalone/common/range/calculate.result
Normal file
194
tests/cases/standalone/common/range/calculate.result
Normal file
@@ -0,0 +1,194 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- Test range expr calculate
|
||||
SELECT ts, host, covar(val, val) RANGE '20s' FROM host ALIGN '10s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------------------------------------------+
|
||||
| ts | host | COVARIANCE(host.val,host.val) RANGE 20s FILL NULL |
|
||||
+---------------------+-------+---------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 0.5 |
|
||||
| 1970-01-01T00:00:20 | host1 | 0.5 |
|
||||
| 1970-01-01T00:00:30 | host1 | |
|
||||
| 1970-01-01T00:00:00 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 0.5 |
|
||||
| 1970-01-01T00:00:20 | host2 | 0.5 |
|
||||
| 1970-01-01T00:00:30 | host2 | |
|
||||
+---------------------+-------+---------------------------------------------------+
|
||||
|
||||
SELECT ts, host, 2 * min(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------------------------------------+
|
||||
| ts | host | Int64(2) * MIN(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+---------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 2 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 4 |
|
||||
| 1970-01-01T00:00:00 | host2 | 6 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 8 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 10 |
|
||||
+---------------------+-------+---------------------------------------------+
|
||||
|
||||
SELECT ts, host, min(val * 2) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------------------------------------+
|
||||
| ts | host | MIN(host.val * Int64(2)) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+---------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 2 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 4 |
|
||||
| 1970-01-01T00:00:00 | host2 | 6 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 8 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 10 |
|
||||
+---------------------+-------+---------------------------------------------+
|
||||
|
||||
SELECT ts, host, min(CAST(val as Float64)) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 4.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 5.0 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(floor(CAST(val as Float64))) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+-----------------------------------------+
|
||||
| ts | host | MIN(floor(host.val)) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+-----------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 4.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 5.0 |
|
||||
+---------------------+-------+-----------------------------------------+
|
||||
|
||||
SELECT ts, host, floor(min(val) RANGE '5s') FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+-----------------------------------------+
|
||||
| ts | host | floor(MIN(host.val) RANGE 5s FILL NULL) |
|
||||
+---------------------+-------+-----------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 4.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 5.0 |
|
||||
+---------------------+-------+-----------------------------------------+
|
||||
|
||||
-- Test complex range expr calculate
|
||||
SELECT ts, host, (min(val) + max(val)) RANGE '20s' + 1.0 FROM host ALIGN '10s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+------------------------------------------------------------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 20s FILL NULL + MAX(host.val) RANGE 20s FILL NULL + Float64(1) |
|
||||
+---------------------+-------+------------------------------------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 1.0 |
|
||||
| 1970-01-01T00:00:10 | host1 | 2.0 |
|
||||
| 1970-01-01T00:00:20 | host1 | 4.0 |
|
||||
| 1970-01-01T00:00:30 | host1 | 5.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 7.0 |
|
||||
| 1970-01-01T00:00:10 | host2 | 8.0 |
|
||||
| 1970-01-01T00:00:20 | host2 | 10.0 |
|
||||
| 1970-01-01T00:00:30 | host2 | 11.0 |
|
||||
+---------------------+-------+------------------------------------------------------------------------------------+
|
||||
|
||||
SELECT ts, host, covar(ceil(CAST(val as Float64)), floor(CAST(val as Float64))) RANGE '20s' FROM host ALIGN '10s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------------------------------------+
|
||||
| ts | host | COVARIANCE(ceil(host.val),floor(host.val)) RANGE 20s FILL NULL |
|
||||
+---------------------+-------+----------------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 0.5 |
|
||||
| 1970-01-01T00:00:20 | host1 | 0.5 |
|
||||
| 1970-01-01T00:00:30 | host1 | |
|
||||
| 1970-01-01T00:00:00 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 0.5 |
|
||||
| 1970-01-01T00:00:20 | host2 | 0.5 |
|
||||
| 1970-01-01T00:00:30 | host2 | |
|
||||
+---------------------+-------+----------------------------------------------------------------+
|
||||
|
||||
SELECT ts, host, floor(cos(ceil(sin(min(val) RANGE '5s')))) FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------------------------------------------------+
|
||||
| ts | host | floor(cos(ceil(sin(MIN(host.val) RANGE 5s FILL NULL)))) |
|
||||
+---------------------+-------+---------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 1.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 1.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 1.0 |
|
||||
+---------------------+-------+---------------------------------------------------------+
|
||||
|
||||
SELECT ts, host, gcd(CAST(max(floor(CAST(val as Float64))) RANGE '10s' FILL PREV as INT64) * 4, max(val * 4) RANGE '10s' FILL PREV) * length(host) + 1 FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| ts | host | gcd(MAX(floor(host.val)) RANGE 10s FILL PREV * Int64(4),MAX(host.val * Int64(4)) RANGE 10s FILL PREV) * character_length(host.host) + Int64(1) |
|
||||
+---------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 1 |
|
||||
| 1970-01-01T00:00:05 | host1 | 1 |
|
||||
| 1970-01-01T00:00:10 | host1 | 21 |
|
||||
| 1970-01-01T00:00:15 | host1 | 21 |
|
||||
| 1970-01-01T00:00:20 | host1 | 41 |
|
||||
| 1970-01-01T00:00:25 | host1 | 41 |
|
||||
| 1970-01-01T00:00:00 | host2 | 61 |
|
||||
| 1970-01-01T00:00:05 | host2 | 61 |
|
||||
| 1970-01-01T00:00:10 | host2 | 81 |
|
||||
| 1970-01-01T00:00:15 | host2 | 81 |
|
||||
| 1970-01-01T00:00:20 | host2 | 101 |
|
||||
| 1970-01-01T00:00:25 | host2 | 101 |
|
||||
+---------------------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
43
tests/cases/standalone/common/range/calculate.sql
Normal file
43
tests/cases/standalone/common/range/calculate.sql
Normal file
@@ -0,0 +1,43 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
-- Test range expr calculate
|
||||
|
||||
SELECT ts, host, covar(val, val) RANGE '20s' FROM host ALIGN '10s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, 2 * min(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val * 2) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(CAST(val as Float64)) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(floor(CAST(val as Float64))) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, floor(min(val) RANGE '5s') FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
-- Test complex range expr calculate
|
||||
|
||||
SELECT ts, host, (min(val) + max(val)) RANGE '20s' + 1.0 FROM host ALIGN '10s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, covar(ceil(CAST(val as Float64)), floor(CAST(val as Float64))) RANGE '20s' FROM host ALIGN '10s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, floor(cos(ceil(sin(min(val) RANGE '5s')))) FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, gcd(CAST(max(floor(CAST(val as Float64))) RANGE '10s' FILL PREV as INT64) * 4, max(val * 4) RANGE '10s' FILL PREV) * length(host) + 1 FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
DROP TABLE host;
|
||||
82
tests/cases/standalone/common/range/error.result
Normal file
82
tests/cases/standalone/common/range/error.result
Normal file
@@ -0,0 +1,82 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- Test Invalid cases
|
||||
-- 1. error timestamp
|
||||
SELECT min(val) RANGE 'not_time' FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: not a valid duration string: not_time
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN 'not_time';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: not a valid duration string: not_time
|
||||
|
||||
-- 2.1 no range param
|
||||
SELECT min(val) FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: Illegal Range select, no RANGE keyword found in any SelectItem
|
||||
|
||||
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
|
||||
|
||||
Error: 1003(Internal), No field named "MAX(host.val)". Valid fields are "MIN(host.val) RANGE 10s FILL NULL", host.ts, host.host.
|
||||
|
||||
SELECT min(val) * 2 RANGE '10s' FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: Can't use the RANGE keyword in Expr 2 without function
|
||||
|
||||
SELECT 1 RANGE '10s' FILL NULL FROM host ALIGN '1h' FILL NULL;
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: Can't use the RANGE keyword in Expr 1 without function
|
||||
|
||||
-- 2.2 no align param
|
||||
SELECT min(val) RANGE '5s' FROM host;
|
||||
|
||||
Error: 1003(Internal), Error during planning: Missing argument in range select query
|
||||
|
||||
-- 2.3 type mismatch
|
||||
SELECT covar(ceil(val), floor(val)) RANGE '20s' FROM host ALIGN '10s';
|
||||
|
||||
Error: 1003(Internal), Internal error: Unsupported data type Int64 for function ceil. This was likely caused by a bug in DataFusion's code and we would welcome that you file an bug report in our issue tracker
|
||||
|
||||
-- 2.4 nest query
|
||||
SELECT min(max(val) RANGE '20s') RANGE '20s' FROM host ALIGN '10s';
|
||||
|
||||
Error: 2000(InvalidSyntax), Range Query: Nest Range Query is not allowed
|
||||
|
||||
-- 2.5 wrong Aggregate
|
||||
SELECT rank() OVER (PARTITION BY host ORDER BY ts DESC) RANGE '10s' FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), Range Query: Window functions is not allowed in Range Query
|
||||
|
||||
-- 2.6 invalid fill
|
||||
SELECT min(val) RANGE '5s', min(val) RANGE '5s' FILL NULL FROM host ALIGN '5s';
|
||||
|
||||
Error: 1003(Internal), Schema contains duplicate unqualified field name "MIN(host.val) RANGE 5s FILL NULL"
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN '5s' FILL 3.0;
|
||||
|
||||
Error: 1003(Internal), Error during planning: 3.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '3.0' to value of Int64 type }
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
59
tests/cases/standalone/common/range/error.sql
Normal file
59
tests/cases/standalone/common/range/error.sql
Normal file
@@ -0,0 +1,59 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
-- Test Invalid cases
|
||||
|
||||
-- 1. error timestamp
|
||||
|
||||
SELECT min(val) RANGE 'not_time' FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN 'not_time';
|
||||
|
||||
-- 2.1 no range param
|
||||
|
||||
SELECT min(val) FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) * 2 RANGE '10s' FROM host ALIGN '5s';
|
||||
|
||||
SELECT 1 RANGE '10s' FILL NULL FROM host ALIGN '1h' FILL NULL;
|
||||
|
||||
-- 2.2 no align param
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host;
|
||||
|
||||
-- 2.3 type mismatch
|
||||
|
||||
SELECT covar(ceil(val), floor(val)) RANGE '20s' FROM host ALIGN '10s';
|
||||
|
||||
-- 2.4 nest query
|
||||
|
||||
SELECT min(max(val) RANGE '20s') RANGE '20s' FROM host ALIGN '10s';
|
||||
|
||||
-- 2.5 wrong Aggregate
|
||||
|
||||
SELECT rank() OVER (PARTITION BY host ORDER BY ts DESC) RANGE '10s' FROM host ALIGN '5s';
|
||||
|
||||
-- 2.6 invalid fill
|
||||
|
||||
SELECT min(val) RANGE '5s', min(val) RANGE '5s' FILL NULL FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN '5s' FILL 3.0;
|
||||
|
||||
DROP TABLE host;
|
||||
112
tests/cases/standalone/common/range/fill.result
Normal file
112
tests/cases/standalone/common/range/fill.result
Normal file
@@ -0,0 +1,112 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- Test Fill
|
||||
SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 4 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 5 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s' FILL NULL ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 4 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 5 |
|
||||
+---------------------+-------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s', min(val) RANGE '5s' FILL 6 FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+-------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 5s FILL NULL | MIN(host.val) RANGE 5s FILL 6 |
|
||||
+---------------------+-------+----------------------------------+-------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | | 6 |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 | 1 |
|
||||
| 1970-01-01T00:00:15 | host1 | | 6 |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 | 2 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3 | 3 |
|
||||
| 1970-01-01T00:00:05 | host2 | | 6 |
|
||||
| 1970-01-01T00:00:10 | host2 | 4 | 4 |
|
||||
| 1970-01-01T00:00:15 | host2 | | 6 |
|
||||
| 1970-01-01T00:00:20 | host2 | 5 | 5 |
|
||||
+---------------------+-------+----------------------------------+-------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s', min(val) RANGE '5s' FILL PREV FROM host ALIGN '5s'ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+----------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 5s FILL NULL | MIN(host.val) RANGE 5s FILL PREV |
|
||||
+---------------------+-------+----------------------------------+----------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | | 0 |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 | 1 |
|
||||
| 1970-01-01T00:00:15 | host1 | | 1 |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 | 2 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3 | 3 |
|
||||
| 1970-01-01T00:00:05 | host2 | | 3 |
|
||||
| 1970-01-01T00:00:10 | host2 | 4 | 4 |
|
||||
| 1970-01-01T00:00:15 | host2 | | 4 |
|
||||
| 1970-01-01T00:00:20 | host2 | 5 | 5 |
|
||||
+---------------------+-------+----------------------------------+----------------------------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s', min(val) RANGE '5s' FILL LINEAR FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+----------------------------------+------------------------------------+
|
||||
| ts | host | MIN(host.val) RANGE 5s FILL NULL | MIN(host.val) RANGE 5s FILL LINEAR |
|
||||
+---------------------+-------+----------------------------------+------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | | 0.5 |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 | 1.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | | 1.5 |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 | 2.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3 | 3.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | | 3.5 |
|
||||
| 1970-01-01T00:00:10 | host2 | 4 | 4.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | | 4.5 |
|
||||
| 1970-01-01T00:00:20 | host2 | 5 | 5.0 |
|
||||
+---------------------+-------+----------------------------------+------------------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
31
tests/cases/standalone/common/range/fill.sql
Normal file
31
tests/cases/standalone/common/range/fill.sql
Normal file
@@ -0,0 +1,31 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
-- Test Fill
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s' FILL NULL ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s', min(val) RANGE '5s' FILL 6 FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s', min(val) RANGE '5s' FILL PREV FROM host ALIGN '5s'ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s', min(val) RANGE '5s' FILL LINEAR FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
DROP TABLE host;
|
||||
51
tests/cases/standalone/common/range/nest.result
Normal file
51
tests/cases/standalone/common/range/nest.result
Normal file
@@ -0,0 +1,51 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- Test range query in nest sql
|
||||
SELECT ts, host, foo FROM (SELECT ts, host, min(val) RANGE '5s' AS foo FROM host ALIGN '5s') WHERE host = 'host1' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+-----+
|
||||
| ts | host | foo |
|
||||
+---------------------+-------+-----+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 |
|
||||
+---------------------+-------+-----+
|
||||
|
||||
SELECT ts, b, min(c) RANGE '5s' FROM (SELECT ts, host AS b, val AS c FROM host WHERE host = 'host1') ALIGN '5s' BY (b) ORDER BY b, ts;
|
||||
|
||||
+---------------------+-------+---------------------------+
|
||||
| ts | b | MIN(c) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+---------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2 |
|
||||
+---------------------+-------+---------------------------+
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
25
tests/cases/standalone/common/range/nest.sql
Normal file
25
tests/cases/standalone/common/range/nest.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val BIGINT,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0),
|
||||
(5000, 'host1', null),
|
||||
(10000, 'host1', 1),
|
||||
(15000, 'host1', null),
|
||||
(20000, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5000, 'host2', null),
|
||||
(10000, 'host2', 4),
|
||||
(15000, 'host2', null),
|
||||
(20000, 'host2', 5);
|
||||
|
||||
-- Test range query in nest sql
|
||||
|
||||
SELECT ts, host, foo FROM (SELECT ts, host, min(val) RANGE '5s' AS foo FROM host ALIGN '5s') WHERE host = 'host1' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, b, min(c) RANGE '5s' FROM (SELECT ts, host AS b, val AS c FROM host WHERE host = 'host1') ALIGN '5s' BY (b) ORDER BY b, ts;
|
||||
|
||||
DROP TABLE host;
|
||||
44
tests/cases/standalone/common/range/precisions.result
Normal file
44
tests/cases/standalone/common/range/precisions.result
Normal file
@@ -0,0 +1,44 @@
|
||||
CREATE TABLE host_sec (
|
||||
ts timestamp(0) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host_sec VALUES
|
||||
(0, 'host1', 0),
|
||||
(5, 'host1', null),
|
||||
(10, 'host1', 1),
|
||||
(15, 'host1', null),
|
||||
(20, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5, 'host2', null),
|
||||
(10, 'host2', 4),
|
||||
(15, 'host2', null),
|
||||
(20, 'host2', 5);
|
||||
|
||||
Affected Rows: 10
|
||||
|
||||
-- Test on Timestamps of different precisions
|
||||
SELECT ts, host, min(val) RANGE '5s' FROM host_sec ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+--------------------------------------+
|
||||
| ts | host | MIN(host_sec.val) RANGE 5s FILL NULL |
|
||||
+---------------------+-------+--------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | |
|
||||
| 1970-01-01T00:00:10 | host1 | 1.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | |
|
||||
| 1970-01-01T00:00:20 | host1 | 2.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 3.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | |
|
||||
| 1970-01-01T00:00:10 | host2 | 4.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | |
|
||||
| 1970-01-01T00:00:20 | host2 | 5.0 |
|
||||
+---------------------+-------+--------------------------------------+
|
||||
|
||||
DROP TABLE host_sec;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
23
tests/cases/standalone/common/range/precisions.sql
Normal file
23
tests/cases/standalone/common/range/precisions.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE host_sec (
|
||||
ts timestamp(0) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host_sec VALUES
|
||||
(0, 'host1', 0),
|
||||
(5, 'host1', null),
|
||||
(10, 'host1', 1),
|
||||
(15, 'host1', null),
|
||||
(20, 'host1', 2),
|
||||
(0, 'host2', 3),
|
||||
(5, 'host2', null),
|
||||
(10, 'host2', 4),
|
||||
(15, 'host2', null),
|
||||
(20, 'host2', 5);
|
||||
|
||||
-- Test on Timestamps of different precisions
|
||||
|
||||
SELECT ts, host, min(val) RANGE '5s' FROM host_sec ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
DROP TABLE host_sec;
|
||||
@@ -1,324 +0,0 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0.0),
|
||||
(5000, 'host1', 1.0),
|
||||
(10000, 'host1', 2.0),
|
||||
(15000, 'host1', 3.0),
|
||||
(20000, 'host1', 4.0),
|
||||
(25000, 'host1', 5.0),
|
||||
(30000, 'host1', 6.0),
|
||||
(35000, 'host1', 7.0),
|
||||
(40000, 'host1', 8.0),
|
||||
(0, 'host2', 9.0),
|
||||
(5000, 'host2', 10.0),
|
||||
(10000, 'host2', 11.0),
|
||||
(15000, 'host2', 12.0),
|
||||
(20000, 'host2', 13.0),
|
||||
(25000, 'host2', 14.0),
|
||||
(30000, 'host2', 15.0),
|
||||
(35000, 'host2', 16.0),
|
||||
(40000, 'host2', 17.0);
|
||||
|
||||
Affected Rows: 18
|
||||
|
||||
SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------+---------------+
|
||||
| ts | host | MIN(host.val) | MAX(host.val) |
|
||||
+---------------------+-------+---------------+---------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | 0.0 | 1.0 |
|
||||
| 1970-01-01T00:00:10 | host1 | 1.0 | 2.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | 2.0 | 3.0 |
|
||||
| 1970-01-01T00:00:20 | host1 | 3.0 | 4.0 |
|
||||
| 1970-01-01T00:00:25 | host1 | 4.0 | 5.0 |
|
||||
| 1970-01-01T00:00:30 | host1 | 5.0 | 6.0 |
|
||||
| 1970-01-01T00:00:35 | host1 | 6.0 | 7.0 |
|
||||
| 1970-01-01T00:00:40 | host1 | 7.0 | 8.0 |
|
||||
| 1970-01-01T00:00:45 | host1 | 8.0 | 8.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 9.0 | 9.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | 9.0 | 10.0 |
|
||||
| 1970-01-01T00:00:10 | host2 | 10.0 | 11.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | 11.0 | 12.0 |
|
||||
| 1970-01-01T00:00:20 | host2 | 12.0 | 13.0 |
|
||||
| 1970-01-01T00:00:25 | host2 | 13.0 | 14.0 |
|
||||
| 1970-01-01T00:00:30 | host2 | 14.0 | 15.0 |
|
||||
| 1970-01-01T00:00:35 | host2 | 15.0 | 16.0 |
|
||||
| 1970-01-01T00:00:40 | host2 | 16.0 | 17.0 |
|
||||
| 1970-01-01T00:00:45 | host2 | 17.0 | 17.0 |
|
||||
+---------------------+-------+---------------+---------------+
|
||||
|
||||
SELECT ts, host, min(val / 2.0)/2 RANGE '10s', max(val / 2.0)/2 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------------------------------+---------------------------------------+
|
||||
| ts | host | MIN(host.val / Float64(2)) / Int64(2) | MAX(host.val / Float64(2)) / Int64(2) |
|
||||
+---------------------+-------+---------------------------------------+---------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | 0.0 | 0.25 |
|
||||
| 1970-01-01T00:00:10 | host1 | 0.25 | 0.5 |
|
||||
| 1970-01-01T00:00:15 | host1 | 0.5 | 0.75 |
|
||||
| 1970-01-01T00:00:20 | host1 | 0.75 | 1.0 |
|
||||
| 1970-01-01T00:00:25 | host1 | 1.0 | 1.25 |
|
||||
| 1970-01-01T00:00:30 | host1 | 1.25 | 1.5 |
|
||||
| 1970-01-01T00:00:35 | host1 | 1.5 | 1.75 |
|
||||
| 1970-01-01T00:00:40 | host1 | 1.75 | 2.0 |
|
||||
| 1970-01-01T00:00:45 | host1 | 2.0 | 2.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 2.25 | 2.25 |
|
||||
| 1970-01-01T00:00:05 | host2 | 2.25 | 2.5 |
|
||||
| 1970-01-01T00:00:10 | host2 | 2.5 | 2.75 |
|
||||
| 1970-01-01T00:00:15 | host2 | 2.75 | 3.0 |
|
||||
| 1970-01-01T00:00:20 | host2 | 3.0 | 3.25 |
|
||||
| 1970-01-01T00:00:25 | host2 | 3.25 | 3.5 |
|
||||
| 1970-01-01T00:00:30 | host2 | 3.5 | 3.75 |
|
||||
| 1970-01-01T00:00:35 | host2 | 3.75 | 4.0 |
|
||||
| 1970-01-01T00:00:40 | host2 | 4.0 | 4.25 |
|
||||
| 1970-01-01T00:00:45 | host2 | 4.25 | 4.25 |
|
||||
+---------------------+-------+---------------------------------------+---------------------------------------+
|
||||
|
||||
SELECT ts, covar(val, val) RANGE '10s', host FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------------------------------+-------+
|
||||
| ts | COVARIANCE(host.val,host.val) | host |
|
||||
+---------------------+-------------------------------+-------+
|
||||
| 1970-01-01T00:00:00 | | host1 |
|
||||
| 1970-01-01T00:00:05 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:10 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:15 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:20 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:25 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:30 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:35 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:40 | 0.5 | host1 |
|
||||
| 1970-01-01T00:00:45 | | host1 |
|
||||
| 1970-01-01T00:00:00 | | host2 |
|
||||
| 1970-01-01T00:00:05 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:10 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:15 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:20 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:25 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:30 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:35 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:40 | 0.5 | host2 |
|
||||
| 1970-01-01T00:00:45 | | host2 |
|
||||
+---------------------+-------------------------------+-------+
|
||||
|
||||
SELECT covar(ceil(val), floor(val)) RANGE '10s', ts, host FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+--------------------------------------------+---------------------+-------+
|
||||
| COVARIANCE(ceil(host.val),floor(host.val)) | ts | host |
|
||||
+--------------------------------------------+---------------------+-------+
|
||||
| | 1970-01-01T00:00:00 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:05 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:10 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:15 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:20 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:25 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:30 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:35 | host1 |
|
||||
| 0.5 | 1970-01-01T00:00:40 | host1 |
|
||||
| | 1970-01-01T00:00:45 | host1 |
|
||||
| | 1970-01-01T00:00:00 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:05 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:10 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:15 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:20 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:25 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:30 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:35 | host2 |
|
||||
| 0.5 | 1970-01-01T00:00:40 | host2 |
|
||||
| | 1970-01-01T00:00:45 | host2 |
|
||||
+--------------------------------------------+---------------------+-------+
|
||||
|
||||
SELECT ts, host, covar((sin(val) + cos(val))/2.0 + 1.0, 2.0) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+--------------------------------------------------------------------------------+
|
||||
| ts | host | COVARIANCE(sin(host.val) + cos(host.val) / Float64(2) + Float64(1),Float64(2)) |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | |
|
||||
| 1970-01-01T00:00:05 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:10 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:20 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:25 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:30 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:35 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:40 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:45 | host1 | |
|
||||
| 1970-01-01T00:00:00 | host2 | |
|
||||
| 1970-01-01T00:00:05 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:10 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:20 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:25 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:30 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:35 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:40 | host2 | 0.0 |
|
||||
| 1970-01-01T00:00:45 | host2 | |
|
||||
+---------------------+-------+--------------------------------------------------------------------------------+
|
||||
|
||||
SELECT ts, min(val) RANGE '10s', host, max(val) RANGE '10s' FROM host ALIGN '1000s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+---------------+-------+---------------+
|
||||
| ts | MIN(host.val) | host | MAX(host.val) |
|
||||
+---------------------+---------------+-------+---------------+
|
||||
| 1970-01-01T00:00:00 | 0.0 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:00 | 9.0 | host2 | 9.0 |
|
||||
+---------------------+---------------+-------+---------------+
|
||||
|
||||
SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+---------------+---------------+
|
||||
| ts | host | MIN(host.val) | MAX(host.val) |
|
||||
+---------------------+-------+---------------+---------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | 0.0 | 1.0 |
|
||||
| 1970-01-01T00:00:10 | host1 | 1.0 | 2.0 |
|
||||
| 1970-01-01T00:00:15 | host1 | 2.0 | 3.0 |
|
||||
| 1970-01-01T00:00:20 | host1 | 3.0 | 4.0 |
|
||||
| 1970-01-01T00:00:25 | host1 | 4.0 | 5.0 |
|
||||
| 1970-01-01T00:00:30 | host1 | 5.0 | 6.0 |
|
||||
| 1970-01-01T00:00:35 | host1 | 6.0 | 7.0 |
|
||||
| 1970-01-01T00:00:40 | host1 | 7.0 | 8.0 |
|
||||
| 1970-01-01T00:00:45 | host1 | 8.0 | |
|
||||
| 1970-01-01T00:00:00 | host2 | 9.0 | 9.0 |
|
||||
| 1970-01-01T00:00:05 | host2 | 9.0 | 10.0 |
|
||||
| 1970-01-01T00:00:10 | host2 | 10.0 | 11.0 |
|
||||
| 1970-01-01T00:00:15 | host2 | 11.0 | 12.0 |
|
||||
| 1970-01-01T00:00:20 | host2 | 12.0 | 13.0 |
|
||||
| 1970-01-01T00:00:25 | host2 | 13.0 | 14.0 |
|
||||
| 1970-01-01T00:00:30 | host2 | 14.0 | 15.0 |
|
||||
| 1970-01-01T00:00:35 | host2 | 15.0 | 16.0 |
|
||||
| 1970-01-01T00:00:40 | host2 | 16.0 | 17.0 |
|
||||
| 1970-01-01T00:00:45 | host2 | 17.0 | |
|
||||
+---------------------+-------+---------------+---------------+
|
||||
|
||||
SELECT ts, host, (min(val)+max(val))/4 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+------------------------------------------+
|
||||
| ts | host | MIN(host.val) + MAX(host.val) / Int64(4) |
|
||||
+---------------------+-------+------------------------------------------+
|
||||
| 1970-01-01T00:00:00 | host1 | 0.0 |
|
||||
| 1970-01-01T00:00:05 | host1 | 0.25 |
|
||||
| 1970-01-01T00:00:10 | host1 | 0.75 |
|
||||
| 1970-01-01T00:00:15 | host1 | 1.25 |
|
||||
| 1970-01-01T00:00:20 | host1 | 1.75 |
|
||||
| 1970-01-01T00:00:25 | host1 | 2.25 |
|
||||
| 1970-01-01T00:00:30 | host1 | 2.75 |
|
||||
| 1970-01-01T00:00:35 | host1 | 3.25 |
|
||||
| 1970-01-01T00:00:40 | host1 | 3.75 |
|
||||
| 1970-01-01T00:00:45 | host1 | 4.0 |
|
||||
| 1970-01-01T00:00:00 | host2 | 4.5 |
|
||||
| 1970-01-01T00:00:05 | host2 | 4.75 |
|
||||
| 1970-01-01T00:00:10 | host2 | 5.25 |
|
||||
| 1970-01-01T00:00:15 | host2 | 5.75 |
|
||||
| 1970-01-01T00:00:20 | host2 | 6.25 |
|
||||
| 1970-01-01T00:00:25 | host2 | 6.75 |
|
||||
| 1970-01-01T00:00:30 | host2 | 7.25 |
|
||||
| 1970-01-01T00:00:35 | host2 | 7.75 |
|
||||
| 1970-01-01T00:00:40 | host2 | 8.25 |
|
||||
| 1970-01-01T00:00:45 | host2 | 8.5 |
|
||||
+---------------------+-------+------------------------------------------+
|
||||
|
||||
SELECT ts, host, foo FROM (SELECT ts, host, (min(val)+max(val))/4 RANGE '10s' AS foo FROM host ALIGN '5s' ORDER BY host, ts) WHERE foo > 5 ORDER BY host, ts;
|
||||
|
||||
+---------------------+-------+------+
|
||||
| ts | host | foo |
|
||||
+---------------------+-------+------+
|
||||
| 1970-01-01T00:00:10 | host2 | 5.25 |
|
||||
| 1970-01-01T00:00:15 | host2 | 5.75 |
|
||||
| 1970-01-01T00:00:20 | host2 | 6.25 |
|
||||
| 1970-01-01T00:00:25 | host2 | 6.75 |
|
||||
| 1970-01-01T00:00:30 | host2 | 7.25 |
|
||||
| 1970-01-01T00:00:35 | host2 | 7.75 |
|
||||
| 1970-01-01T00:00:40 | host2 | 8.25 |
|
||||
| 1970-01-01T00:00:45 | host2 | 8.5 |
|
||||
+---------------------+-------+------+
|
||||
|
||||
SELECT ts, b, (min(c)+max(c))/4 RANGE '10s' FROM (SELECT ts, host AS b, val AS c FROM host WHERE val > 8.0) ALIGN '5s' BY (b) ORDER BY b, ts;
|
||||
|
||||
+---------------------+-------+----------------------------+
|
||||
| ts | b | MIN(c) + MAX(c) / Int64(4) |
|
||||
+---------------------+-------+----------------------------+
|
||||
| 1970-01-01T00:00:00 | host2 | 4.5 |
|
||||
| 1970-01-01T00:00:05 | host2 | 4.75 |
|
||||
| 1970-01-01T00:00:10 | host2 | 5.25 |
|
||||
| 1970-01-01T00:00:15 | host2 | 5.75 |
|
||||
| 1970-01-01T00:00:20 | host2 | 6.25 |
|
||||
| 1970-01-01T00:00:25 | host2 | 6.75 |
|
||||
| 1970-01-01T00:00:30 | host2 | 7.25 |
|
||||
| 1970-01-01T00:00:35 | host2 | 7.75 |
|
||||
| 1970-01-01T00:00:40 | host2 | 8.25 |
|
||||
| 1970-01-01T00:00:45 | host2 | 8.5 |
|
||||
+---------------------+-------+----------------------------+
|
||||
|
||||
-- Test Invalid cases
|
||||
-- 1. error timestamp
|
||||
SELECT min(val) RANGE 'not_time' FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: not a valid duration string: not_time
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN 'not_time';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: not a valid duration string: not_time
|
||||
|
||||
-- 2.1 no range param
|
||||
SELECT min(val) FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: RANGE argument not found in min(val)
|
||||
|
||||
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
|
||||
|
||||
Error: 2000(InvalidSyntax), sql parser error: RANGE argument not found in max(val)
|
||||
|
||||
-- 2.2 no align param
|
||||
SELECT min(val) RANGE '5s' FROM host;
|
||||
|
||||
Error: 1003(Internal), Error during planning: Illegal argument in range select query
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
CREATE TABLE host_sec (
|
||||
ts timestamp(0) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE,
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO TABLE host_sec VALUES
|
||||
(0, 'host1', 0.0),
|
||||
(5, 'host1', 1.0),
|
||||
(10, 'host1', 2.0),
|
||||
(15, 'host1', 3.0),
|
||||
(20, 'host1', 4.0),
|
||||
(25, 'host1', 5.0),
|
||||
(30, 'host1', 6.0),
|
||||
(35, 'host1', 7.0),
|
||||
(40, 'host1', 8.0),
|
||||
(0, 'host2', 9.0),
|
||||
(5, 'host2', 10.0),
|
||||
(10, 'host2', 11.0),
|
||||
(15, 'host2', 12.0),
|
||||
(20, 'host2', 13.0),
|
||||
(25, 'host2', 14.0),
|
||||
(30, 'host2', 15.0),
|
||||
(35, 'host2', 16.0),
|
||||
(40, 'host2', 17.0);
|
||||
|
||||
Affected Rows: 18
|
||||
|
||||
-- TODO(ruihang): This query returns incorrect result.
|
||||
-- SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host_sec ALIGN '5s' ORDER BY host, ts;
|
||||
DROP TABLE host_sec;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
CREATE TABLE host (
|
||||
ts timestamp(3) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host VALUES
|
||||
(0, 'host1', 0.0),
|
||||
(5000, 'host1', 1.0),
|
||||
(10000, 'host1', 2.0),
|
||||
(15000, 'host1', 3.0),
|
||||
(20000, 'host1', 4.0),
|
||||
(25000, 'host1', 5.0),
|
||||
(30000, 'host1', 6.0),
|
||||
(35000, 'host1', 7.0),
|
||||
(40000, 'host1', 8.0),
|
||||
(0, 'host2', 9.0),
|
||||
(5000, 'host2', 10.0),
|
||||
(10000, 'host2', 11.0),
|
||||
(15000, 'host2', 12.0),
|
||||
(20000, 'host2', 13.0),
|
||||
(25000, 'host2', 14.0),
|
||||
(30000, 'host2', 15.0),
|
||||
(35000, 'host2', 16.0),
|
||||
(40000, 'host2', 17.0);
|
||||
|
||||
SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val / 2.0)/2 RANGE '10s', max(val / 2.0)/2 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, covar(val, val) RANGE '10s', host FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT covar(ceil(val), floor(val)) RANGE '10s', ts, host FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, covar((sin(val) + cos(val))/2.0 + 1.0, 2.0) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, min(val) RANGE '10s', host, max(val) RANGE '10s' FROM host ALIGN '1000s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, (min(val)+max(val))/4 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
SELECT ts, host, foo FROM (SELECT ts, host, (min(val)+max(val))/4 RANGE '10s' AS foo FROM host ALIGN '5s' ORDER BY host, ts) WHERE foo > 5 ORDER BY host, ts;
|
||||
|
||||
SELECT ts, b, (min(c)+max(c))/4 RANGE '10s' FROM (SELECT ts, host AS b, val AS c FROM host WHERE val > 8.0) ALIGN '5s' BY (b) ORDER BY b, ts;
|
||||
|
||||
-- Test Invalid cases
|
||||
|
||||
-- 1. error timestamp
|
||||
|
||||
SELECT min(val) RANGE 'not_time' FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host ALIGN 'not_time';
|
||||
|
||||
-- 2.1 no range param
|
||||
|
||||
SELECT min(val) FROM host ALIGN '5s';
|
||||
|
||||
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
|
||||
|
||||
-- 2.2 no align param
|
||||
|
||||
SELECT min(val) RANGE '5s' FROM host;
|
||||
|
||||
DROP TABLE host;
|
||||
|
||||
CREATE TABLE host_sec (
|
||||
ts timestamp(0) time index,
|
||||
host STRING PRIMARY KEY,
|
||||
val DOUBLE,
|
||||
);
|
||||
|
||||
INSERT INTO TABLE host_sec VALUES
|
||||
(0, 'host1', 0.0),
|
||||
(5, 'host1', 1.0),
|
||||
(10, 'host1', 2.0),
|
||||
(15, 'host1', 3.0),
|
||||
(20, 'host1', 4.0),
|
||||
(25, 'host1', 5.0),
|
||||
(30, 'host1', 6.0),
|
||||
(35, 'host1', 7.0),
|
||||
(40, 'host1', 8.0),
|
||||
(0, 'host2', 9.0),
|
||||
(5, 'host2', 10.0),
|
||||
(10, 'host2', 11.0),
|
||||
(15, 'host2', 12.0),
|
||||
(20, 'host2', 13.0),
|
||||
(25, 'host2', 14.0),
|
||||
(30, 'host2', 15.0),
|
||||
(35, 'host2', 16.0),
|
||||
(40, 'host2', 17.0);
|
||||
|
||||
-- TODO(ruihang): This query returns incorrect result.
|
||||
-- SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host_sec ALIGN '5s' ORDER BY host, ts;
|
||||
|
||||
DROP TABLE host_sec;
|
||||
Reference in New Issue
Block a user