mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 13:22:57 +00:00
Compare commits
9 Commits
recording_
...
v0.12.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8da5949fc5 | ||
|
|
db6a63ef6c | ||
|
|
f166b93b02 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
* @GreptimeTeam/db-approver
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
## [Module] Databse Engine
|
## [Module] Database Engine
|
||||||
/src/index @zhongzc
|
/src/index @zhongzc
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
/src/query @evenyag
|
/src/query @evenyag
|
||||||
|
|||||||
@@ -41,7 +41,14 @@ runs:
|
|||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-ubuntu image
|
- name: Set up qemu for multi-platform builds
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# The latest version will lead to segmentation fault.
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -52,7 +59,7 @@ runs:
|
|||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image
|
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -69,8 +76,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ runs:
|
|||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
features: servers/dashboard,pg_kvbackend
|
features: servers/dashboard
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
@@ -70,7 +70,7 @@ runs:
|
|||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard,pg_kvbackend
|
features: servers/dashboard
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ runs:
|
|||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: make test sqlness-test
|
run: make test sqlness-test
|
||||||
env:
|
env:
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
SQLNESS_OPTS: "--preserve-state"
|
||||||
|
|
||||||
|
|||||||
@@ -64,11 +64,11 @@ inputs:
|
|||||||
upload-max-retry-times:
|
upload-max-retry-times:
|
||||||
description: Max retry times for uploading artifacts to S3
|
description: Max retry times for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "20"
|
default: "30"
|
||||||
upload-retry-timeout:
|
upload-retry-timeout:
|
||||||
description: Timeout for uploading artifacts to S3
|
description: Timeout for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "30" # minutes
|
default: "120" # minutes
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
@@ -8,15 +8,15 @@ inputs:
|
|||||||
default: 2
|
default: 2
|
||||||
description: "Number of Datanode replicas"
|
description: "Number of Datanode replicas"
|
||||||
meta-replicas:
|
meta-replicas:
|
||||||
default: 1
|
default: 2
|
||||||
description: "Number of Metasrv replicas"
|
description: "Number of Metasrv replicas"
|
||||||
image-registry:
|
image-registry:
|
||||||
default: "docker.io"
|
default: "docker.io"
|
||||||
description: "Image registry"
|
description: "Image registry"
|
||||||
image-repository:
|
image-repository:
|
||||||
default: "greptime/greptimedb"
|
default: "greptime/greptimedb"
|
||||||
description: "Image repository"
|
description: "Image repository"
|
||||||
image-tag:
|
image-tag:
|
||||||
default: "latest"
|
default: "latest"
|
||||||
description: 'Image tag'
|
description: 'Image tag'
|
||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
@@ -32,12 +32,12 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Install GreptimeDB operator
|
- name: Install GreptimeDB operator
|
||||||
uses: nick-fields/retry@v3
|
uses: nick-fields/retry@v3
|
||||||
with:
|
with:
|
||||||
timeout_minutes: 3
|
timeout_minutes: 3
|
||||||
max_attempts: 3
|
max_attempts: 3
|
||||||
shell: bash
|
shell: bash
|
||||||
command: |
|
command: |
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
helm repo update
|
helm repo update
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install \
|
--install \
|
||||||
@@ -48,10 +48,10 @@ runs:
|
|||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Install GreptimeDB cluster
|
- name: Install GreptimeDB cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install my-greptimedb \
|
--install my-greptimedb \
|
||||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
@@ -59,7 +59,7 @@ runs:
|
|||||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||||
@@ -72,7 +72,7 @@ runs:
|
|||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
while true; do
|
while true; do
|
||||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||||
if [ "$PHASE" == "Running" ]; then
|
if [ "$PHASE" == "Running" ]; then
|
||||||
echo "Cluster is ready"
|
echo "Cluster is ready"
|
||||||
@@ -86,10 +86,10 @@ runs:
|
|||||||
- name: Print GreptimeDB info
|
- name: Print GreptimeDB info
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl get all --show-labels -n my-greptimedb
|
kubectl get all --show-labels -n my-greptimedb
|
||||||
- name: Describe Nodes
|
- name: Describe Nodes
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ meta:
|
|||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
global_rt_size = 4
|
global_rt_size = 4
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
provider = "kafka"
|
provider = "kafka"
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
num_topics = 3
|
num_topics = 3
|
||||||
|
auto_prune_interval = "30s"
|
||||||
|
trigger_flush_threshold = 100
|
||||||
|
|
||||||
|
|
||||||
[datanode]
|
[datanode]
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "120s"
|
timeout = "120s"
|
||||||
@@ -21,7 +22,7 @@ datanode:
|
|||||||
[wal]
|
[wal]
|
||||||
provider = "kafka"
|
provider = "kafka"
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
linger = "2ms"
|
overwrite_entry_start_id = true
|
||||||
frontend:
|
frontend:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
|
|||||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Start EC2 runner
|
- name: Start EC2 runner
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
id: start-linux-arm64-ec2-runner
|
id: start-linux-arm64-ec2-runner
|
||||||
with:
|
with:
|
||||||
mode: start
|
mode: start
|
||||||
|
|||||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -33,7 +33,7 @@ runs:
|
|||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
with:
|
with:
|
||||||
mode: stop
|
mode: stop
|
||||||
label: ${{ inputs.label }}
|
label: ${{ inputs.label }}
|
||||||
|
|||||||
15
.github/labeler.yaml
vendored
Normal file
15
.github/labeler.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
ci:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: .github/**
|
||||||
|
|
||||||
|
docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docker/**
|
||||||
|
|
||||||
|
documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docs/**
|
||||||
|
|
||||||
|
dashboard:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: grafana/**
|
||||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,24 +8,25 @@ set -e
|
|||||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||||
function create_version() {
|
function create_version() {
|
||||||
# Read from envrionment variables.
|
# Read from environment variables.
|
||||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||||
echo "GITHUB_EVENT_NAME is empty"
|
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||||
echo "NEXT_RELEASE_VERSION is empty"
|
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||||
exit 1
|
# NOTE: Need a `v` prefix for the version string.
|
||||||
|
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
# It will be like 'nightly-20230808-7d0d8dc6'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||||
exit 0
|
exit 0
|
||||||
@@ -35,7 +36,7 @@ function create_version() {
|
|||||||
# It will be like 'dev-2023080819-f0e7216c'.
|
# It will be like 'dev-2023080819-f0e7216c'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||||
if [ -z "$COMMIT_SHA" ]; then
|
if [ -z "$COMMIT_SHA" ]; then
|
||||||
echo "COMMIT_SHA is empty in dev build"
|
echo "COMMIT_SHA is empty in dev build" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||||
@@ -45,7 +46,7 @@ function create_version() {
|
|||||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||||
echo "GITHUB_REF_NAME is empty in push event"
|
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "$GITHUB_REF_NAME"
|
echo "$GITHUB_REF_NAME"
|
||||||
@@ -54,15 +55,15 @@ function create_version() {
|
|||||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||||
else
|
else
|
||||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# You can run as following examples:
|
# You can run as following examples:
|
||||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
create_version
|
create_version
|
||||||
|
|||||||
6
.github/scripts/deploy-greptimedb.sh
vendored
6
.github/scripts/deploy-greptimedb.sh
vendored
@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
|||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
|
|
||||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
# Create a cluster with 1 control-plane node and 5 workers.
|
||||||
function create_kind_cluster() {
|
function create_kind_cluster() {
|
||||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
kind: Cluster
|
kind: Cluster
|
||||||
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set storage.s3.region="$AWS_REGION" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set storage.s3.root="$DATA_ROOT" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
|
|||||||
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DEV_BUILDER_IMAGE_TAG=$1
|
||||||
|
|
||||||
|
update_dev_builder_version() {
|
||||||
|
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||||
|
echo "Error: Should specify the dev-builder image tag"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email greptimedb-ci@greptime.com
|
||||||
|
git config --global user.name greptimedb-ci
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update the dev-builder image tag in the Makefile.
|
||||||
|
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add Makefile
|
||||||
|
git commit -m "ci: update dev-builder image tag"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "ci: update dev-builder image tag" \
|
||||||
|
--body "This PR updates the dev-builder image tag" \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_dev_builder_version
|
||||||
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
VERSION=${VERSION}
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||||
|
|
||||||
|
update_helm_charts_version() {
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email update-helm-charts-version@greptime.com
|
||||||
|
git config --global user.name update-helm-charts-version
|
||||||
|
|
||||||
|
# Clone helm-charts repository.
|
||||||
|
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
||||||
|
cd helm-charts
|
||||||
|
|
||||||
|
# Set default remote for gh CLI
|
||||||
|
gh repo set-default GreptimeTeam/helm-charts
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update version.
|
||||||
|
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
||||||
|
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
||||||
|
|
||||||
|
# Update docs.
|
||||||
|
make docs
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add .
|
||||||
|
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||||
|
--body "This PR updates the GreptimeDB version." \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_helm_charts_version
|
||||||
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
VERSION=${VERSION}
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||||
|
|
||||||
|
update_homebrew_greptime_version() {
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email update-greptime-version@greptime.com
|
||||||
|
git config --global user.name update-greptime-version
|
||||||
|
|
||||||
|
# Clone helm-charts repository.
|
||||||
|
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
||||||
|
cd homebrew-greptime
|
||||||
|
|
||||||
|
# Set default remote for gh CLI
|
||||||
|
gh repo set-default GreptimeTeam/homebrew-greptime
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update version.
|
||||||
|
make update-greptime-version VERSION=${VERSION}
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add .
|
||||||
|
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||||
|
--body "This PR updates the GreptimeDB version." \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_homebrew_greptime_version
|
||||||
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -41,7 +41,7 @@ function upload_artifacts() {
|
|||||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||||
function update_version_info() {
|
function update_version_info() {
|
||||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
echo "Updating latest-version.txt"
|
echo "Updating latest-version.txt"
|
||||||
echo "$VERSION" > latest-version.txt
|
echo "$VERSION" > latest-version.txt
|
||||||
|
|||||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -14,7 +14,7 @@ name: Build API docs
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
36
.github/workflows/dev-build.yml
vendored
36
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -55,6 +55,11 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
|
upload_artifacts_to_s3:
|
||||||
|
type: boolean
|
||||||
|
description: Whether upload artifacts to s3
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
cargo_profile:
|
cargo_profile:
|
||||||
type: choice
|
type: choice
|
||||||
description: The cargo profile to use in building GreptimeDB.
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
@@ -83,7 +88,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -218,7 +223,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -239,6 +244,13 @@ jobs:
|
|||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
dev-mode: true # Only build the standard images.
|
dev-mode: true # Only build the standard images.
|
||||||
|
|
||||||
|
- name: Echo Docker image tag to step summary
|
||||||
|
run: |
|
||||||
|
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
- name: Set build result
|
- name: Set build result
|
||||||
id: set-build-result
|
id: set-build-result
|
||||||
run: |
|
run: |
|
||||||
@@ -251,7 +263,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -274,7 +286,7 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
upload-to-s3: false
|
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||||
dev-mode: true # Only build the standard images(exclude centos images).
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
update-version-info: false # Don't update the version info in S3.
|
update-version-info: false # Don't update the version info in S3.
|
||||||
@@ -283,7 +295,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -309,7 +321,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -337,7 +349,7 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
|
|||||||
78
.github/workflows/develop.yml
vendored
78
.github/workflows/develop.yml
vendored
@@ -22,8 +22,9 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check typos and docs
|
name: Check typos and docs
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -36,7 +37,8 @@ jobs:
|
|||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -45,11 +47,12 @@ jobs:
|
|||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -71,8 +74,9 @@ jobs:
|
|||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
toml:
|
toml:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -85,11 +89,12 @@ jobs:
|
|||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Build GreptimeDB binaries
|
name: Build GreptimeDB binaries
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -127,6 +132,7 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
fuzztest:
|
fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test
|
name: Fuzz Test
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -183,11 +189,13 @@ jobs:
|
|||||||
max-total-time: 120
|
max-total-time: 120
|
||||||
|
|
||||||
unstable-fuzztest:
|
unstable-fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Unstable Fuzz Test
|
name: Unstable Fuzz Test
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||||
steps:
|
steps:
|
||||||
@@ -215,12 +223,12 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
cargo install cargo-fuzz cargo-gc-bin --force
|
cargo install cargo-fuzz cargo-gc-bin --force
|
||||||
- name: Download pre-built binariy
|
- name: Download pre-built binary
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bin
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip bianry
|
- name: Unzip binary
|
||||||
run: |
|
run: |
|
||||||
tar -xvf ./bin.tar.gz
|
tar -xvf ./bin.tar.gz
|
||||||
rm ./bin.tar.gz
|
rm ./bin.tar.gz
|
||||||
@@ -242,13 +250,19 @@ jobs:
|
|||||||
name: unstable-fuzz-logs
|
name: unstable-fuzz-logs
|
||||||
path: /tmp/unstable-greptime/
|
path: /tmp/unstable-greptime/
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
- name: Describe pods
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
|
|
||||||
build-greptime-ci:
|
build-greptime-ci:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Build GreptimeDB binary (profile-CI)
|
name: Build GreptimeDB binary (profile-CI)
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -267,7 +281,7 @@ jobs:
|
|||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime bianry
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||||
@@ -285,11 +299,13 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
distributed-fuzztest:
|
distributed-fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
mode:
|
mode:
|
||||||
@@ -319,9 +335,9 @@ jobs:
|
|||||||
name: Setup Minio
|
name: Setup Minio
|
||||||
uses: ./.github/actions/setup-minio
|
uses: ./.github/actions/setup-minio
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup Kafka cluser
|
name: Setup Kafka cluster
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluster
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
@@ -394,6 +410,11 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
- name: Describe pod
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -416,11 +437,13 @@ jobs:
|
|||||||
docker system prune -f
|
docker system prune -f
|
||||||
|
|
||||||
distributed-fuzztest-with-chaos:
|
distributed-fuzztest-with-chaos:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||||
mode:
|
mode:
|
||||||
@@ -465,9 +488,9 @@ jobs:
|
|||||||
name: Setup Minio
|
name: Setup Minio
|
||||||
uses: ./.github/actions/setup-minio
|
uses: ./.github/actions/setup-minio
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup Kafka cluser
|
name: Setup Kafka cluster
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluster
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
@@ -541,6 +564,11 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
- name: Describe pods
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -563,12 +591,14 @@ jobs:
|
|||||||
docker system prune -f
|
docker system prune -f
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
opts: ""
|
opts: ""
|
||||||
@@ -576,7 +606,7 @@ jobs:
|
|||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
kafka: true
|
kafka: true
|
||||||
- name: "Pg Kvbackend"
|
- name: "PostgreSQL KvBackend"
|
||||||
opts: "--setup-pg"
|
opts: "--setup-pg"
|
||||||
kafka: false
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
@@ -606,8 +636,9 @@ jobs:
|
|||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -623,8 +654,9 @@ jobs:
|
|||||||
run: make fmt-check
|
run: make fmt-check
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -648,6 +680,7 @@ jobs:
|
|||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
conflict-check:
|
conflict-check:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check for conflict
|
name: Check for conflict
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -658,7 +691,7 @@ jobs:
|
|||||||
uses: olivernybroe/action-conflict-finder@v4.0
|
uses: olivernybroe/action-conflict-finder@v4.0
|
||||||
|
|
||||||
test:
|
test:
|
||||||
if: github.event_name != 'merge_group'
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||||
runs-on: ubuntu-22.04-arm
|
runs-on: ubuntu-22.04-arm
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
needs: [conflict-check, clippy, fmt]
|
needs: [conflict-check, clippy, fmt]
|
||||||
@@ -704,13 +737,14 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event_name == 'merge_group'
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
||||||
runs-on: ubuntu-20.04-8-cores
|
runs-on: ubuntu-22.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -755,6 +789,7 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
@@ -768,9 +803,10 @@ jobs:
|
|||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
# compat:
|
# compat:
|
||||||
|
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
# name: Compatibility Test
|
# name: Compatibility Test
|
||||||
# needs: build
|
# needs: build
|
||||||
# runs-on: ubuntu-20.04
|
# runs-on: ubuntu-22.04
|
||||||
# timeout-minutes: 60
|
# timeout-minutes: 60
|
||||||
# steps:
|
# steps:
|
||||||
# - uses: actions/checkout@v4
|
# - uses: actions/checkout@v4
|
||||||
|
|||||||
2
.github/workflows/docbot.yml
vendored
2
.github/workflows/docbot.yml
vendored
@@ -9,7 +9,7 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docbot:
|
docbot:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
contents: read
|
contents: read
|
||||||
|
|||||||
16
.github/workflows/docs.yml
vendored
16
.github/workflows/docs.yml
vendored
@@ -31,7 +31,7 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -39,7 +39,7 @@ jobs:
|
|||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -49,29 +49,29 @@ jobs:
|
|||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
|
|||||||
22
.github/workflows/nightly-build.yml
vendored
22
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -70,7 +70,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -182,7 +182,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -214,7 +214,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -249,7 +249,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -275,7 +275,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -303,7 +303,7 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
env:
|
env:
|
||||||
|
|||||||
17
.github/workflows/nightly-ci.yml
vendored
17
.github/workflows/nightly-ci.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
sqlness-test:
|
sqlness-test:
|
||||||
name: Run sqlness test
|
name: Run sqlness test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -107,7 +107,6 @@ jobs:
|
|||||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
@@ -118,22 +117,22 @@ jobs:
|
|||||||
name: Run clean build on Linux
|
name: Run clean build on Linux
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
timeout-minutes: 60
|
timeout-minutes: 45
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: cachix/install-nix-action@v27
|
- uses: cachix/install-nix-action@v31
|
||||||
with:
|
- run: nix develop --command cargo check --bin greptime
|
||||||
nix_path: nixpkgs=channel:nixos-24.11
|
env:
|
||||||
- run: nix develop --command cargo build
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -146,7 +145,7 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [check-status]
|
needs: [check-status]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: 'PR Labeling'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
configuration-path: ".github/labeler.yaml"
|
||||||
|
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
|
size-label:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: pascalgn/size-label-action@v0.5.5
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
with:
|
||||||
|
sizes: >
|
||||||
|
{
|
||||||
|
"0": "XS",
|
||||||
|
"100": "S",
|
||||||
|
"300": "M",
|
||||||
|
"1000": "L",
|
||||||
|
"1500": "XL",
|
||||||
|
"2000": "XXL"
|
||||||
|
}
|
||||||
@@ -24,12 +24,20 @@ on:
|
|||||||
description: Release dev-builder-android image
|
description: Release dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
|
update_dev_builder_image_tag:
|
||||||
|
type: boolean
|
||||||
|
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
# The jobs are triggered by the following events:
|
||||||
runs-on: ubuntu-20.04-16-cores
|
# 1. Manually triggered workflow_dispatch event
|
||||||
|
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
version: ${{ steps.set-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
@@ -57,13 +65,13 @@ jobs:
|
|||||||
version: ${{ env.VERSION }}
|
version: ${{ env.VERSION }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
|
|
||||||
release-dev-builder-images-ecr:
|
release-dev-builder-images-ecr:
|
||||||
name: Release dev builder images to AWS ECR
|
name: Release dev builder images to AWS ECR
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -85,7 +93,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -106,7 +114,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -127,7 +135,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -148,7 +156,7 @@ jobs:
|
|||||||
|
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -162,7 +170,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -176,7 +184,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -190,7 +198,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -201,3 +209,24 @@ jobs:
|
|||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|
||||||
|
update-dev-builder-image-tag:
|
||||||
|
name: Update dev-builder image tag
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||||
|
needs: [
|
||||||
|
release-dev-builder-images
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Update dev-builder image tag
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|||||||
94
.github/workflows/release.yml
vendored
94
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -88,16 +88,14 @@ env:
|
|||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
|
|
||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
|
||||||
NEXT_RELEASE_VERSION: v0.12.0
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -126,7 +124,7 @@ jobs:
|
|||||||
|
|
||||||
# The create-version will create a global variable named 'version' in the global workflows.
|
# The create-version will create a global variable named 'version' in the global workflows.
|
||||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -135,7 +133,6 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
|
||||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||||
|
|
||||||
- name: Allocate linux-amd64 runner
|
- name: Allocate linux-amd64 runner
|
||||||
@@ -299,7 +296,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-2004-16-cores
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -317,7 +314,7 @@ jobs:
|
|||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: true
|
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -335,7 +332,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -364,7 +361,7 @@ jobs:
|
|||||||
dev-mode: false
|
dev-mode: false
|
||||||
upload-to-s3: true
|
upload-to-s3: true
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: true
|
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
@@ -377,7 +374,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -391,12 +388,12 @@ jobs:
|
|||||||
|
|
||||||
### Stop runners ###
|
### Stop runners ###
|
||||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -422,7 +419,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -444,11 +441,11 @@ jobs:
|
|||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
bump-doc-version:
|
bump-downstream-repo-versions:
|
||||||
name: Bump doc version
|
name: Bump downstream repo versions
|
||||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [allocate-runners]
|
needs: [allocate-runners, publish-github-release]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
permissions:
|
permissions:
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
@@ -459,13 +456,58 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Bump doc version
|
- name: Bump downstream repo versions
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
run: pnpm tsx bin/bump-doc-version.ts
|
run: pnpm tsx bin/bump-versions.ts
|
||||||
env:
|
env:
|
||||||
|
TARGET_REPOS: website,docs,demo
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||||
|
|
||||||
|
bump-helm-charts-version:
|
||||||
|
name: Bump helm charts version
|
||||||
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Bump helm charts version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-helm-charts-version.sh
|
||||||
|
|
||||||
|
bump-homebrew-greptime-version:
|
||||||
|
name: Bump homebrew greptime version
|
||||||
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Bump homebrew greptime version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-homebrew-greptme-version.sh
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
@@ -475,7 +517,7 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
permissions:
|
permissions:
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
|||||||
5
.github/workflows/semantic-pull-request.yml
vendored
5
.github/workflows/semantic-pull-request.yml
vendored
@@ -13,7 +13,10 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
pull-requests: write # Add permissions to modify PRs
|
||||||
|
issues: write
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -54,3 +54,10 @@ tests-fuzz/corpus/
|
|||||||
# Nix
|
# Nix
|
||||||
.direnv
|
.direnv
|
||||||
.envrc
|
.envrc
|
||||||
|
|
||||||
|
## default data home
|
||||||
|
greptimedb_data
|
||||||
|
|
||||||
|
# github
|
||||||
|
!/.github
|
||||||
|
|
||||||
|
|||||||
142
Cargo.lock
generated
142
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "api"
|
name = "api"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-decimal",
|
"common-decimal",
|
||||||
@@ -710,7 +710,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "auth"
|
name = "auth"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1324,7 +1324,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cache"
|
name = "cache"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"catalog",
|
"catalog",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "catalog"
|
name = "catalog"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow",
|
"arrow",
|
||||||
@@ -1661,7 +1661,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cli"
|
name = "cli"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
@@ -1703,7 +1703,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -1712,7 +1712,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "client"
|
name = "client"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -1739,7 +1739,7 @@ dependencies = [
|
|||||||
"rand",
|
"rand",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"substrait 0.37.3",
|
"substrait 0.37.3",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -1780,7 +1780,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmd"
|
name = "cmd"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
@@ -1841,7 +1841,7 @@ dependencies = [
|
|||||||
"similar-asserts",
|
"similar-asserts",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"temp-env",
|
"temp-env",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -1887,7 +1887,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-base"
|
name = "common-base"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1909,11 +1909,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-catalog"
|
name = "common-catalog"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-config"
|
name = "common-config"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1938,7 +1938,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-datasource"
|
name = "common-datasource"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
@@ -1974,7 +1974,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-decimal"
|
name = "common-decimal"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bigdecimal 0.4.5",
|
"bigdecimal 0.4.5",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1987,7 +1987,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-error"
|
name = "common-error"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
@@ -1997,7 +1997,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-frontend"
|
name = "common-frontend"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2007,7 +2007,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-function"
|
name = "common-function"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -2055,7 +2055,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-greptimedb-telemetry"
|
name = "common-greptimedb-telemetry"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-runtime",
|
"common-runtime",
|
||||||
@@ -2072,7 +2072,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc"
|
name = "common-grpc"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -2100,7 +2100,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc-expr"
|
name = "common-grpc-expr"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -2119,7 +2119,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-macro"
|
name = "common-macro"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-query",
|
"common-query",
|
||||||
@@ -2133,7 +2133,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-mem-prof"
|
name = "common-mem-prof"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -2146,7 +2146,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-meta"
|
name = "common-meta"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"api",
|
"api",
|
||||||
@@ -2206,7 +2206,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-options"
|
name = "common-options"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
@@ -2215,11 +2215,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-plugins"
|
name = "common-plugins"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-pprof"
|
name = "common-pprof"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -2231,7 +2231,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure"
|
name = "common-procedure"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2258,7 +2258,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure-test"
|
name = "common-procedure-test"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-procedure",
|
"common-procedure",
|
||||||
@@ -2266,7 +2266,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-query"
|
name = "common-query"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2292,7 +2292,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-recordbatch"
|
name = "common-recordbatch"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2311,7 +2311,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-runtime"
|
name = "common-runtime"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -2341,7 +2341,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-telemetry"
|
name = "common-telemetry"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atty",
|
"atty",
|
||||||
"backtrace",
|
"backtrace",
|
||||||
@@ -2369,7 +2369,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-test-util"
|
name = "common-test-util"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"client",
|
"client",
|
||||||
"common-query",
|
"common-query",
|
||||||
@@ -2381,7 +2381,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-time"
|
name = "common-time"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -2399,7 +2399,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-version"
|
name = "common-version"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"build-data",
|
"build-data",
|
||||||
"const_format",
|
"const_format",
|
||||||
@@ -2409,7 +2409,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-wal"
|
name = "common-wal"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -3340,7 +3340,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datanode"
|
name = "datanode"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -3392,7 +3392,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml 0.8.19",
|
"toml 0.8.19",
|
||||||
@@ -3401,7 +3401,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datatypes"
|
name = "datatypes"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4045,7 +4045,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "file-engine"
|
name = "file-engine"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -4155,7 +4155,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flow"
|
name = "flow"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow",
|
"arrow",
|
||||||
@@ -4216,7 +4216,7 @@ dependencies = [
|
|||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"strum 0.25.0",
|
"strum 0.25.0",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tonic 0.12.3",
|
"tonic 0.12.3",
|
||||||
@@ -4271,7 +4271,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "frontend"
|
name = "frontend"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -5539,7 +5539,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "index"
|
name = "index"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
@@ -6331,7 +6331,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-query"
|
name = "log-query"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -6343,7 +6343,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-store"
|
name = "log-store"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -6636,7 +6636,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-client"
|
name = "meta-client"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -6663,7 +6663,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-srv"
|
name = "meta-srv"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -6749,7 +6749,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "metric-engine"
|
name = "metric-engine"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -6847,7 +6847,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mito2"
|
name = "mito2"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -7544,7 +7544,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "object-store"
|
name = "object-store"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -7793,7 +7793,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "operator"
|
name = "operator"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -7841,7 +7841,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -8078,7 +8078,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "partition"
|
name = "partition"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -8346,7 +8346,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pipeline"
|
name = "pipeline"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -8486,7 +8486,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "plugins"
|
name = "plugins"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"auth",
|
"auth",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -8748,7 +8748,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "promql"
|
name = "promql"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -8993,7 +8993,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "puffin"
|
name = "puffin"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-compression 0.4.13",
|
"async-compression 0.4.13",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -9034,7 +9034,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "query"
|
name = "query"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -9099,7 +9099,7 @@ dependencies = [
|
|||||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||||
"statrs",
|
"statrs",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -10444,7 +10444,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "servers"
|
name = "servers"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -10561,7 +10561,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "session"
|
name = "session"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -10870,7 +10870,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sql"
|
name = "sql"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -10924,7 +10924,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlness-runner"
|
name = "sqlness-runner"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -11241,7 +11241,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "store-api"
|
name = "store-api"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -11371,7 +11371,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "substrait"
|
name = "substrait"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -11552,7 +11552,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "table"
|
name = "table"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -11803,7 +11803,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-fuzz"
|
name = "tests-fuzz"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arbitrary",
|
"arbitrary",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -11847,7 +11847,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-integration"
|
name = "tests-integration"
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -11913,7 +11913,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlx",
|
"sqlx",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.1",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"time",
|
"time",
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.12.0"
|
version = "0.12.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
|
|||||||
@@ -319,6 +319,7 @@
|
|||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
|
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
|||||||
@@ -231,6 +231,7 @@ overwrite_entry_start_id = false
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
|
|||||||
@@ -50,6 +50,9 @@ use_memory_store = false
|
|||||||
## - Using shared storage (e.g., s3).
|
## - Using shared storage (e.g., s3).
|
||||||
enable_region_failover = false
|
enable_region_failover = false
|
||||||
|
|
||||||
|
## Max allowed idle time before removing node info from metasrv memory.
|
||||||
|
node_max_idle_time = "24hours"
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||||
#+ enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
|
|||||||
@@ -318,6 +318,7 @@ retry_delay = "500ms"
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
|
|||||||
@@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
mod client;
|
mod client;
|
||||||
pub mod client_manager;
|
pub mod client_manager;
|
||||||
|
#[cfg(feature = "testing")]
|
||||||
mod database;
|
mod database;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod flow;
|
pub mod flow;
|
||||||
@@ -33,6 +34,7 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
pub use self::client::Client;
|
pub use self::client::Client;
|
||||||
|
#[cfg(feature = "testing")]
|
||||||
pub use self::database::Database;
|
pub use self::database::Database;
|
||||||
pub use self::error::{Error, Result};
|
pub use self::error::{Error, Result};
|
||||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ use common_meta::key::TableMetadataManager;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::TracingOptions;
|
use common_telemetry::logging::TracingOptions;
|
||||||
use common_version::{short_version, version};
|
use common_version::{short_version, version};
|
||||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendClient, FrontendInvoker};
|
use flow::{FlownodeBuilder, FlownodeInstance, FrontendInvoker};
|
||||||
use meta_client::{MetaClientOptions, MetaClientType};
|
use meta_client::{MetaClientOptions, MetaClientType};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -317,8 +317,6 @@ impl StartCommand {
|
|||||||
Arc::new(executor),
|
Arc::new(executor),
|
||||||
);
|
);
|
||||||
|
|
||||||
let frontend_client = FrontendClient::from_meta_client(meta_client.clone());
|
|
||||||
|
|
||||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||||
let flownode_builder = FlownodeBuilder::new(
|
let flownode_builder = FlownodeBuilder::new(
|
||||||
opts,
|
opts,
|
||||||
@@ -326,7 +324,6 @@ impl StartCommand {
|
|||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
Arc::new(frontend_client),
|
|
||||||
)
|
)
|
||||||
.with_heartbeat_task(heartbeat_task);
|
.with_heartbeat_task(heartbeat_task);
|
||||||
|
|
||||||
|
|||||||
@@ -54,10 +54,7 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
|
|||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use datanode::region_server::RegionServer;
|
use datanode::region_server::RegionServer;
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
use flow::{
|
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
|
||||||
FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendClient,
|
|
||||||
FrontendInvoker,
|
|
||||||
};
|
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||||
@@ -536,16 +533,12 @@ impl StartCommand {
|
|||||||
flow: opts.flow.clone(),
|
flow: opts.flow.clone(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let fe_server_addr = fe_opts.grpc.bind_addr.clone();
|
|
||||||
let frontend_client = FrontendClient::from_static_grpc_addr(fe_server_addr);
|
|
||||||
let flow_builder = FlownodeBuilder::new(
|
let flow_builder = FlownodeBuilder::new(
|
||||||
flownode_options,
|
flownode_options,
|
||||||
plugins.clone(),
|
plugins.clone(),
|
||||||
table_metadata_manager.clone(),
|
table_metadata_manager.clone(),
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
flow_metadata_manager.clone(),
|
flow_metadata_manager.clone(),
|
||||||
Arc::new(frontend_client),
|
|
||||||
);
|
);
|
||||||
let flownode = Arc::new(
|
let flownode = Arc::new(
|
||||||
flow_builder
|
flow_builder
|
||||||
|
|||||||
@@ -57,12 +57,10 @@ pub trait ClusterInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||||
///
|
|
||||||
/// This key cannot be used to describe the `Metasrv` because the `Metasrv` does not have
|
|
||||||
/// a `cluster_id`, it serves multiple clusters.
|
|
||||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct NodeInfoKey {
|
pub struct NodeInfoKey {
|
||||||
/// The cluster id.
|
/// The cluster id.
|
||||||
|
// todo(hl): remove cluster_id as it is not assigned anywhere.
|
||||||
pub cluster_id: ClusterId,
|
pub cluster_id: ClusterId,
|
||||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||||
pub role: Role,
|
pub role: Role,
|
||||||
@@ -232,8 +230,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NodeInfoKey> for Vec<u8> {
|
impl From<&NodeInfoKey> for Vec<u8> {
|
||||||
fn from(key: NodeInfoKey) -> Self {
|
fn from(key: &NodeInfoKey) -> Self {
|
||||||
format!(
|
format!(
|
||||||
"{}-{}-{}-{}",
|
"{}-{}-{}-{}",
|
||||||
CLUSTER_NODE_INFO_PREFIX,
|
CLUSTER_NODE_INFO_PREFIX,
|
||||||
@@ -315,7 +313,7 @@ mod tests {
|
|||||||
node_id: 2,
|
node_id: 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
let key_bytes: Vec<u8> = key.into();
|
let key_bytes: Vec<u8> = (&key).into();
|
||||||
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
||||||
|
|
||||||
assert_eq!(1, new_key.cluster_id);
|
assert_eq!(1, new_key.cluster_id);
|
||||||
|
|||||||
@@ -343,7 +343,6 @@ pub enum FlowType {
|
|||||||
impl FlowType {
|
impl FlowType {
|
||||||
pub const RECORDING_RULE: &str = "recording_rule";
|
pub const RECORDING_RULE: &str = "recording_rule";
|
||||||
pub const STREAMING: &str = "streaming";
|
pub const STREAMING: &str = "streaming";
|
||||||
pub const FLOW_TYPE_KEY: &str = "flow_type";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for FlowType {
|
impl Default for FlowType {
|
||||||
@@ -399,8 +398,7 @@ impl From<&CreateFlowData> for CreateRequest {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||||
req.flow_options
|
req.flow_options.insert("flow_type".to_string(), flow_type);
|
||||||
.insert(FlowType::FLOW_TYPE_KEY.to_string(), flow_type);
|
|
||||||
req
|
req
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -432,7 +430,7 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||||
options.insert(FlowType::FLOW_TYPE_KEY.to_string(), flow_type);
|
options.insert("flow_type".to_string(), flow_type);
|
||||||
|
|
||||||
let flow_info = FlowInfoValue {
|
let flow_info = FlowInfoValue {
|
||||||
source_table_ids: value.source_table_ids.clone(),
|
source_table_ids: value.source_table_ids.clone(),
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ pub mod kv_backend;
|
|||||||
pub mod leadership_notifier;
|
pub mod leadership_notifier;
|
||||||
pub mod lock_key;
|
pub mod lock_key;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
|
pub mod node_expiry_listener;
|
||||||
pub mod node_manager;
|
pub mod node_manager;
|
||||||
pub mod peer;
|
pub mod peer;
|
||||||
pub mod range_stream;
|
pub mod range_stream;
|
||||||
|
|||||||
152
src/common/meta/src/node_expiry_listener.rs
Normal file
152
src/common/meta/src/node_expiry_listener.rs
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use common_telemetry::{debug, error, info, warn};
|
||||||
|
use tokio::task::JoinHandle;
|
||||||
|
use tokio::time::{interval, MissedTickBehavior};
|
||||||
|
|
||||||
|
use crate::cluster::{NodeInfo, NodeInfoKey};
|
||||||
|
use crate::error;
|
||||||
|
use crate::kv_backend::ResettableKvBackendRef;
|
||||||
|
use crate::leadership_notifier::LeadershipChangeListener;
|
||||||
|
use crate::rpc::store::RangeRequest;
|
||||||
|
use crate::rpc::KeyValue;
|
||||||
|
|
||||||
|
/// [NodeExpiryListener] periodically checks all node info in memory and removes
|
||||||
|
/// expired node info to prevent memory leak.
|
||||||
|
pub struct NodeExpiryListener {
|
||||||
|
handle: Mutex<Option<JoinHandle<()>>>,
|
||||||
|
max_idle_time: Duration,
|
||||||
|
in_memory: ResettableKvBackendRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for NodeExpiryListener {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeExpiryListener {
|
||||||
|
pub fn new(max_idle_time: Duration, in_memory: ResettableKvBackendRef) -> Self {
|
||||||
|
Self {
|
||||||
|
handle: Mutex::new(None),
|
||||||
|
max_idle_time,
|
||||||
|
in_memory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&self) {
|
||||||
|
let mut handle = self.handle.lock().unwrap();
|
||||||
|
if handle.is_none() {
|
||||||
|
let in_memory = self.in_memory.clone();
|
||||||
|
|
||||||
|
let max_idle_time = self.max_idle_time;
|
||||||
|
let ticker_loop = tokio::spawn(async move {
|
||||||
|
// Run clean task every minute.
|
||||||
|
let mut interval = interval(Duration::from_secs(60));
|
||||||
|
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
if let Err(e) = Self::clean_expired_nodes(&in_memory, max_idle_time).await {
|
||||||
|
error!(e; "Failed to clean expired node");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
*handle = Some(ticker_loop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self) {
|
||||||
|
if let Some(handle) = self.handle.lock().unwrap().take() {
|
||||||
|
handle.abort();
|
||||||
|
info!("Node expiry listener stopped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cleans expired nodes from memory.
|
||||||
|
async fn clean_expired_nodes(
|
||||||
|
in_memory: &ResettableKvBackendRef,
|
||||||
|
max_idle_time: Duration,
|
||||||
|
) -> error::Result<()> {
|
||||||
|
let node_keys = Self::list_expired_nodes(in_memory, max_idle_time).await?;
|
||||||
|
for key in node_keys {
|
||||||
|
let key_bytes: Vec<u8> = (&key).into();
|
||||||
|
if let Err(e) = in_memory.delete(&key_bytes, false).await {
|
||||||
|
warn!(e; "Failed to delete expired node: {:?}", key_bytes);
|
||||||
|
} else {
|
||||||
|
debug!("Deleted expired node key: {:?}", key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists expired nodes that have been inactive more than `max_idle_time`.
|
||||||
|
async fn list_expired_nodes(
|
||||||
|
in_memory: &ResettableKvBackendRef,
|
||||||
|
max_idle_time: Duration,
|
||||||
|
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
|
||||||
|
let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
|
||||||
|
let req = RangeRequest::new().with_prefix(prefix);
|
||||||
|
let current_time_millis = common_time::util::current_time_millis();
|
||||||
|
let resp = in_memory.range(req).await?;
|
||||||
|
Ok(resp
|
||||||
|
.kvs
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(move |KeyValue { key, value }| {
|
||||||
|
let Ok(info) = NodeInfo::try_from(value).inspect_err(|e| {
|
||||||
|
warn!(e; "Unrecognized node info value");
|
||||||
|
}) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
if (current_time_millis - info.last_activity_ts) > max_idle_time.as_millis() as i64
|
||||||
|
{
|
||||||
|
NodeInfoKey::try_from(key)
|
||||||
|
.inspect_err(|e| {
|
||||||
|
warn!(e; "Unrecognized node info key: {:?}", info.peer);
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
.inspect(|node_key| {
|
||||||
|
debug!("Found expired node: {:?}", node_key);
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl LeadershipChangeListener for NodeExpiryListener {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"NodeExpiryListener"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn on_leader_start(&self) -> error::Result<()> {
|
||||||
|
self.start().await;
|
||||||
|
info!(
|
||||||
|
"On leader start, node expiry listener started with max idle time: {:?}",
|
||||||
|
self.max_idle_time
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn on_leader_stop(&self) -> error::Result<()> {
|
||||||
|
self.stop();
|
||||||
|
info!("On leader stop, node expiry listener stopped");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -171,6 +171,10 @@ pub struct S3Config {
|
|||||||
pub secret_access_key: SecretString,
|
pub secret_access_key: SecretString,
|
||||||
pub endpoint: Option<String>,
|
pub endpoint: Option<String>,
|
||||||
pub region: Option<String>,
|
pub region: Option<String>,
|
||||||
|
/// Enable virtual host style so that opendal will send API requests in virtual host style instead of path style.
|
||||||
|
/// By default, opendal will send API to https://s3.us-east-1.amazonaws.com/bucket_name
|
||||||
|
/// Enabled, opendal will send API to https://bucket_name.s3.us-east-1.amazonaws.com
|
||||||
|
pub enable_virtual_host_style: bool,
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub cache: ObjectStorageCacheConfig,
|
pub cache: ObjectStorageCacheConfig,
|
||||||
pub http_client: HttpClientConfig,
|
pub http_client: HttpClientConfig,
|
||||||
@@ -185,6 +189,7 @@ impl PartialEq for S3Config {
|
|||||||
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
||||||
&& self.endpoint == other.endpoint
|
&& self.endpoint == other.endpoint
|
||||||
&& self.region == other.region
|
&& self.region == other.region
|
||||||
|
&& self.enable_virtual_host_style == other.enable_virtual_host_style
|
||||||
&& self.cache == other.cache
|
&& self.cache == other.cache
|
||||||
&& self.http_client == other.http_client
|
&& self.http_client == other.http_client
|
||||||
}
|
}
|
||||||
@@ -289,6 +294,7 @@ impl Default for S3Config {
|
|||||||
root: String::default(),
|
root: String::default(),
|
||||||
access_key_id: SecretString::from(String::default()),
|
access_key_id: SecretString::from(String::default()),
|
||||||
secret_access_key: SecretString::from(String::default()),
|
secret_access_key: SecretString::from(String::default()),
|
||||||
|
enable_virtual_host_style: false,
|
||||||
endpoint: Option::default(),
|
endpoint: Option::default(),
|
||||||
region: Option::default(),
|
region: Option::default(),
|
||||||
cache: ObjectStorageCacheConfig::default(),
|
cache: ObjectStorageCacheConfig::default(),
|
||||||
|
|||||||
@@ -41,10 +41,13 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
|
|||||||
|
|
||||||
if s3_config.endpoint.is_some() {
|
if s3_config.endpoint.is_some() {
|
||||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||||
};
|
}
|
||||||
if s3_config.region.is_some() {
|
if s3_config.region.is_some() {
|
||||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||||
};
|
}
|
||||||
|
if s3_config.enable_virtual_host_style {
|
||||||
|
builder = builder.enable_virtual_host_style();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(ObjectStore::new(builder)
|
Ok(ObjectStore::new(builder)
|
||||||
.context(error::InitBackendSnafu)?
|
.context(error::InitBackendSnafu)?
|
||||||
|
|||||||
@@ -32,5 +32,5 @@ pub mod types;
|
|||||||
pub mod value;
|
pub mod value;
|
||||||
pub mod vectors;
|
pub mod vectors;
|
||||||
|
|
||||||
pub use arrow;
|
pub use arrow::{self, compute};
|
||||||
pub use error::{Error, Result};
|
pub use error::{Error, Result};
|
||||||
|
|||||||
@@ -49,13 +49,12 @@ pub(crate) use crate::adapter::node_context::FlownodeContext;
|
|||||||
use crate::adapter::refill::RefillTask;
|
use crate::adapter::refill::RefillTask;
|
||||||
use crate::adapter::table_source::ManagedTableSource;
|
use crate::adapter::table_source::ManagedTableSource;
|
||||||
use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
|
use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
|
||||||
pub(crate) use crate::adapter::worker::{create_worker, WorkerHandle};
|
pub(crate) use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
|
||||||
use crate::compute::ErrCollector;
|
use crate::compute::ErrCollector;
|
||||||
use crate::df_optimizer::sql_to_flow_plan;
|
use crate::df_optimizer::sql_to_flow_plan;
|
||||||
use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
|
use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
|
||||||
use crate::expr::Batch;
|
use crate::expr::Batch;
|
||||||
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
|
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
|
||||||
use crate::recording_rules::RecordingRuleEngine;
|
|
||||||
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
|
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
|
||||||
|
|
||||||
mod flownode_impl;
|
mod flownode_impl;
|
||||||
@@ -172,8 +171,6 @@ pub struct FlowWorkerManager {
|
|||||||
flush_lock: RwLock<()>,
|
flush_lock: RwLock<()>,
|
||||||
/// receive a oneshot sender to send state size report
|
/// receive a oneshot sender to send state size report
|
||||||
state_report_handler: RwLock<Option<StateReportHandler>>,
|
state_report_handler: RwLock<Option<StateReportHandler>>,
|
||||||
/// engine for recording rule
|
|
||||||
rule_engine: RecordingRuleEngine,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Building FlownodeManager
|
/// Building FlownodeManager
|
||||||
@@ -188,7 +185,6 @@ impl FlowWorkerManager {
|
|||||||
node_id: Option<u32>,
|
node_id: Option<u32>,
|
||||||
query_engine: Arc<dyn QueryEngine>,
|
query_engine: Arc<dyn QueryEngine>,
|
||||||
table_meta: TableMetadataManagerRef,
|
table_meta: TableMetadataManagerRef,
|
||||||
rule_engine: RecordingRuleEngine,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let srv_map = ManagedTableSource::new(
|
let srv_map = ManagedTableSource::new(
|
||||||
table_meta.table_info_manager().clone(),
|
table_meta.table_info_manager().clone(),
|
||||||
@@ -211,7 +207,6 @@ impl FlowWorkerManager {
|
|||||||
node_id,
|
node_id,
|
||||||
flush_lock: RwLock::new(()),
|
flush_lock: RwLock::new(()),
|
||||||
state_report_handler: RwLock::new(None),
|
state_report_handler: RwLock::new(None),
|
||||||
rule_engine,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -220,6 +215,25 @@ impl FlowWorkerManager {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a flownode manager with one worker
|
||||||
|
pub fn new_with_workers<'s>(
|
||||||
|
node_id: Option<u32>,
|
||||||
|
query_engine: Arc<dyn QueryEngine>,
|
||||||
|
table_meta: TableMetadataManagerRef,
|
||||||
|
num_workers: usize,
|
||||||
|
) -> (Self, Vec<Worker<'s>>) {
|
||||||
|
let mut zelf = Self::new(node_id, query_engine, table_meta);
|
||||||
|
|
||||||
|
let workers: Vec<_> = (0..num_workers)
|
||||||
|
.map(|_| {
|
||||||
|
let (handle, worker) = create_worker();
|
||||||
|
zelf.add_worker_handle(handle);
|
||||||
|
worker
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
(zelf, workers)
|
||||||
|
}
|
||||||
|
|
||||||
/// add a worker handler to manager, meaning this corresponding worker is under it's manage
|
/// add a worker handler to manager, meaning this corresponding worker is under it's manage
|
||||||
pub fn add_worker_handle(&mut self, handle: WorkerHandle) {
|
pub fn add_worker_handle(&mut self, handle: WorkerHandle) {
|
||||||
self.worker_handles.push(handle);
|
self.worker_handles.push(handle);
|
||||||
@@ -737,11 +751,7 @@ pub struct CreateFlowArgs {
|
|||||||
/// Create&Remove flow
|
/// Create&Remove flow
|
||||||
impl FlowWorkerManager {
|
impl FlowWorkerManager {
|
||||||
/// remove a flow by it's id
|
/// remove a flow by it's id
|
||||||
#[allow(unreachable_code)]
|
|
||||||
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
|
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
|
||||||
// TODO(discord9): reroute some back to streaming engine later
|
|
||||||
return self.rule_engine.remove_flow(flow_id).await;
|
|
||||||
|
|
||||||
for handle in self.worker_handles.iter() {
|
for handle in self.worker_handles.iter() {
|
||||||
if handle.contains_flow(flow_id).await? {
|
if handle.contains_flow(flow_id).await? {
|
||||||
handle.remove_flow(flow_id).await?;
|
handle.remove_flow(flow_id).await?;
|
||||||
@@ -757,10 +767,8 @@ impl FlowWorkerManager {
|
|||||||
/// steps to create task:
|
/// steps to create task:
|
||||||
/// 1. parse query into typed plan(and optional parse expire_after expr)
|
/// 1. parse query into typed plan(and optional parse expire_after expr)
|
||||||
/// 2. render source/sink with output table id and used input table id
|
/// 2. render source/sink with output table id and used input table id
|
||||||
#[allow(clippy::too_many_arguments, unreachable_code)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
pub async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
||||||
// TODO(discord9): reroute some back to streaming engine later
|
|
||||||
return self.rule_engine.create_flow(args).await;
|
|
||||||
let CreateFlowArgs {
|
let CreateFlowArgs {
|
||||||
flow_id,
|
flow_id,
|
||||||
sink_table_name,
|
sink_table_name,
|
||||||
|
|||||||
@@ -153,10 +153,7 @@ impl Flownode for FlowWorkerManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unreachable_code, unused)]
|
|
||||||
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
|
async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
|
||||||
return Ok(Default::default());
|
|
||||||
|
|
||||||
// using try_read to ensure two things:
|
// using try_read to ensure two things:
|
||||||
// 1. flush wouldn't happen until inserts before it is inserted
|
// 1. flush wouldn't happen until inserts before it is inserted
|
||||||
// 2. inserts happening concurrently with flush wouldn't be block by flush
|
// 2. inserts happening concurrently with flush wouldn't be block by flush
|
||||||
|
|||||||
@@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
|
||||||
use arrow_schema::ArrowError;
|
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_error::{define_into_tonic_status, from_err_code_msg_to_header};
|
use common_error::{define_into_tonic_status, from_err_code_msg_to_header};
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
@@ -157,15 +156,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Arrow error: {raw:?} in context: {context}"))]
|
|
||||||
Arrow {
|
|
||||||
#[snafu(source)]
|
|
||||||
raw: ArrowError,
|
|
||||||
context: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Datafusion error: {raw:?} in context: {context}"))]
|
#[snafu(display("Datafusion error: {raw:?} in context: {context}"))]
|
||||||
Datafusion {
|
Datafusion {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
@@ -240,7 +230,6 @@ impl ErrorExt for Error {
|
|||||||
match self {
|
match self {
|
||||||
Self::Eval { .. }
|
Self::Eval { .. }
|
||||||
| Self::JoinTask { .. }
|
| Self::JoinTask { .. }
|
||||||
| Self::Arrow { .. }
|
|
||||||
| Self::Datafusion { .. }
|
| Self::Datafusion { .. }
|
||||||
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
|
| Self::InsertIntoFlow { .. } => StatusCode::Internal,
|
||||||
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
|
||||||
|
|||||||
@@ -238,7 +238,6 @@ mod test {
|
|||||||
|
|
||||||
for (sql, current, expected) in &testcases {
|
for (sql, current, expected) in &testcases {
|
||||||
let plan = sql_to_substrait(engine.clone(), sql).await;
|
let plan = sql_to_substrait(engine.clone(), sql).await;
|
||||||
|
|
||||||
let mut ctx = create_test_ctx();
|
let mut ctx = create_test_ctx();
|
||||||
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan)
|
let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
//! Send heartbeat from flownode to metasrv
|
//! Send heartbeat from flownode to metasrv
|
||||||
|
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::meta::{HeartbeatRequest, Peer};
|
use api::v1::meta::{HeartbeatRequest, Peer};
|
||||||
@@ -24,7 +25,7 @@ use common_meta::heartbeat::handler::{
|
|||||||
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
|
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
|
||||||
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
|
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_telemetry::{debug, error, info};
|
use common_telemetry::{debug, error, info, warn};
|
||||||
use greptime_proto::v1::meta::NodeInfo;
|
use greptime_proto::v1::meta::NodeInfo;
|
||||||
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
|
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
|
||||||
use servers::addrs;
|
use servers::addrs;
|
||||||
@@ -65,6 +66,7 @@ pub struct HeartbeatTask {
|
|||||||
report_interval: Duration,
|
report_interval: Duration,
|
||||||
retry_interval: Duration,
|
retry_interval: Duration,
|
||||||
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||||
|
running: Arc<AtomicBool>,
|
||||||
query_stat_size: Option<SizeReportSender>,
|
query_stat_size: Option<SizeReportSender>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,11 +89,25 @@ impl HeartbeatTask {
|
|||||||
report_interval: heartbeat_opts.interval,
|
report_interval: heartbeat_opts.interval,
|
||||||
retry_interval: heartbeat_opts.retry_interval,
|
retry_interval: heartbeat_opts.retry_interval,
|
||||||
resp_handler_executor,
|
resp_handler_executor,
|
||||||
|
running: Arc::new(AtomicBool::new(false)),
|
||||||
query_stat_size: None,
|
query_stat_size: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(&self) -> Result<(), Error> {
|
pub async fn start(&self) -> Result<(), Error> {
|
||||||
|
if self
|
||||||
|
.running
|
||||||
|
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
warn!("Heartbeat task started multiple times");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.create_streams().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_streams(&self) -> Result<(), Error> {
|
||||||
info!("Start to establish the heartbeat connection to metasrv.");
|
info!("Start to establish the heartbeat connection to metasrv.");
|
||||||
let (req_sender, resp_stream) = self
|
let (req_sender, resp_stream) = self
|
||||||
.meta_client
|
.meta_client
|
||||||
@@ -114,6 +130,13 @@ impl HeartbeatTask {
|
|||||||
|
|
||||||
pub fn shutdown(&self) {
|
pub fn shutdown(&self) {
|
||||||
info!("Close heartbeat task for flownode");
|
info!("Close heartbeat task for flownode");
|
||||||
|
if self
|
||||||
|
.running
|
||||||
|
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
warn!("Call close heartbeat task multiple times");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_heartbeat_request(
|
fn new_heartbeat_request(
|
||||||
@@ -258,7 +281,7 @@ impl HeartbeatTask {
|
|||||||
|
|
||||||
info!("Try to re-establish the heartbeat connection to metasrv.");
|
info!("Try to re-establish the heartbeat connection to metasrv.");
|
||||||
|
|
||||||
if self.start().await.is_ok() {
|
if self.create_streams().await.is_ok() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ mod expr;
|
|||||||
pub mod heartbeat;
|
pub mod heartbeat;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod plan;
|
mod plan;
|
||||||
mod recording_rules;
|
|
||||||
mod repr;
|
mod repr;
|
||||||
mod server;
|
mod server;
|
||||||
mod transform;
|
mod transform;
|
||||||
@@ -44,5 +43,4 @@ mod test_utils;
|
|||||||
|
|
||||||
pub use adapter::{FlowConfig, FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions};
|
pub use adapter::{FlowConfig, FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions};
|
||||||
pub use error::{Error, Result};
|
pub use error::{Error, Result};
|
||||||
pub use recording_rules::FrontendClient;
|
|
||||||
pub use server::{FlownodeBuilder, FlownodeInstance, FlownodeServer, FrontendInvoker};
|
pub use server::{FlownodeBuilder, FlownodeInstance, FlownodeServer, FrontendInvoker};
|
||||||
|
|||||||
@@ -28,32 +28,6 @@ lazy_static! {
|
|||||||
&["table_id"]
|
&["table_id"]
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
pub static ref METRIC_FLOW_RULE_ENGINE_QUERY_TIME: HistogramVec = register_histogram_vec!(
|
|
||||||
"greptime_flow_rule_engine_query_time",
|
|
||||||
"flow rule engine query time",
|
|
||||||
&["flow_id"],
|
|
||||||
vec![
|
|
||||||
0.0,
|
|
||||||
1.,
|
|
||||||
3.,
|
|
||||||
5.,
|
|
||||||
10.,
|
|
||||||
20.,
|
|
||||||
30.,
|
|
||||||
60.,
|
|
||||||
2. * 60.,
|
|
||||||
5. * 60.,
|
|
||||||
10. * 60.
|
|
||||||
]
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
pub static ref METRIC_FLOW_RULE_ENGINE_SLOW_QUERY: HistogramVec = register_histogram_vec!(
|
|
||||||
"greptime_flow_rule_engine_slow_query",
|
|
||||||
"flow rule engine slow query",
|
|
||||||
&["flow_id", "sql", "peer"],
|
|
||||||
vec![60., 2. * 60., 3. * 60., 5. * 60., 10. * 60.]
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
pub static ref METRIC_FLOW_RUN_INTERVAL_MS: IntGauge =
|
||||||
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
register_int_gauge!("greptime_flow_run_interval_ms", "flow run interval in ms").unwrap();
|
||||||
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
pub static ref METRIC_FLOW_ROWS: IntCounterVec = register_int_counter_vec!(
|
||||||
|
|||||||
@@ -1,744 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//! Run flow as recording rule which is time-window-aware normal query triggered every tick set by user
|
|
||||||
|
|
||||||
mod engine;
|
|
||||||
mod frontend_client;
|
|
||||||
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_recordbatch::DfRecordBatch;
|
|
||||||
use common_time::timestamp::TimeUnit;
|
|
||||||
use common_time::Timestamp;
|
|
||||||
use datafusion::error::Result as DfResult;
|
|
||||||
use datafusion::logical_expr::Expr;
|
|
||||||
use datafusion::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
|
|
||||||
use datafusion::prelude::SessionContext;
|
|
||||||
use datafusion::sql::unparser::Unparser;
|
|
||||||
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRecursion, TreeNodeRewriter};
|
|
||||||
use datafusion_common::{Column, DFSchema, TableReference};
|
|
||||||
use datafusion_expr::LogicalPlan;
|
|
||||||
use datafusion_physical_expr::PhysicalExprRef;
|
|
||||||
use datatypes::prelude::{ConcreteDataType, DataType};
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use datatypes::vectors::{
|
|
||||||
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
|
||||||
TimestampSecondVector, Vector,
|
|
||||||
};
|
|
||||||
pub use engine::RecordingRuleEngine;
|
|
||||||
pub use frontend_client::FrontendClient;
|
|
||||||
use query::parser::QueryLanguageParser;
|
|
||||||
use query::QueryEngineRef;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
|
||||||
|
|
||||||
use crate::df_optimizer::apply_df_optimizer;
|
|
||||||
use crate::error::{ArrowSnafu, DatafusionSnafu, DatatypesSnafu, ExternalSnafu, UnexpectedSnafu};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
/// Convert sql to datafusion logical plan
|
|
||||||
pub async fn sql_to_df_plan(
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
engine: QueryEngineRef,
|
|
||||||
sql: &str,
|
|
||||||
optimize: bool,
|
|
||||||
) -> Result<LogicalPlan, Error> {
|
|
||||||
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
let plan = engine
|
|
||||||
.planner()
|
|
||||||
.plan(&stmt, query_ctx)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
let plan = if optimize {
|
|
||||||
apply_df_optimizer(plan).await?
|
|
||||||
} else {
|
|
||||||
plan
|
|
||||||
};
|
|
||||||
Ok(plan)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find nearest lower bound for time `current` in given `plan` for the time window expr.
|
|
||||||
/// i.e. for time window expr being `date_bin(INTERVAL '5 minutes', ts) as time_window` and `current="2021-07-01 00:01:01.000"`,
|
|
||||||
/// return `Some("2021-07-01 00:00:00.000")`
|
|
||||||
/// if `plan` doesn't contain a `TIME INDEX` column, return `None`
|
|
||||||
///
|
|
||||||
/// Time window expr is a expr that:
|
|
||||||
/// 1. ref only to a time index column
|
|
||||||
/// 2. is monotonic increasing
|
|
||||||
/// 3. show up in GROUP BY clause
|
|
||||||
///
|
|
||||||
/// note this plan should only contain one TableScan
|
|
||||||
pub async fn find_plan_time_window_bound(
|
|
||||||
plan: &LogicalPlan,
|
|
||||||
current: Timestamp,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
engine: QueryEngineRef,
|
|
||||||
) -> Result<(String, Option<Timestamp>, Option<Timestamp>), Error> {
|
|
||||||
// TODO(discord9): find the expr that do time window
|
|
||||||
let catalog_man = engine.engine_state().catalog_manager();
|
|
||||||
|
|
||||||
let mut table_name = None;
|
|
||||||
// first find the table source in the logical plan
|
|
||||||
plan.apply(|plan| {
|
|
||||||
let LogicalPlan::TableScan(table_scan) = plan else {
|
|
||||||
return Ok(TreeNodeRecursion::Continue);
|
|
||||||
};
|
|
||||||
table_name = Some(table_scan.table_name.clone());
|
|
||||||
Ok(TreeNodeRecursion::Stop)
|
|
||||||
})
|
|
||||||
.with_context(|_| DatafusionSnafu {
|
|
||||||
context: format!("Can't find table source in plan {plan:?}"),
|
|
||||||
})?;
|
|
||||||
let Some(table_name) = table_name else {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!("Can't find table source in plan {plan:?}"),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
};
|
|
||||||
|
|
||||||
let current_schema = query_ctx.current_schema();
|
|
||||||
|
|
||||||
let catalog_name = table_name.catalog().unwrap_or(query_ctx.current_catalog());
|
|
||||||
let schema_name = table_name.schema().unwrap_or(¤t_schema);
|
|
||||||
let table_name = table_name.table();
|
|
||||||
|
|
||||||
let Some(table_ref) = catalog_man
|
|
||||||
.table(catalog_name, schema_name, table_name, Some(&query_ctx))
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?
|
|
||||||
else {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!(
|
|
||||||
"Can't find table {table_name:?} in catalog {catalog_name:?}/{schema_name:?}"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
};
|
|
||||||
|
|
||||||
let schema = &table_ref.table_info().meta.schema;
|
|
||||||
|
|
||||||
let ts_index = schema.timestamp_column().context(UnexpectedSnafu {
|
|
||||||
reason: format!("Can't find timestamp column in table {table_name:?}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let ts_col_name = ts_index.name.clone();
|
|
||||||
|
|
||||||
let expected_time_unit = ts_index.data_type.as_timestamp().with_context(|| UnexpectedSnafu {
|
|
||||||
reason: format!(
|
|
||||||
"Expected timestamp column {ts_col_name:?} in table {table_name:?} to be timestamp, but got {ts_index:?}"
|
|
||||||
),
|
|
||||||
})?.unit();
|
|
||||||
|
|
||||||
let ts_columns: HashSet<_> = HashSet::from_iter(vec![
|
|
||||||
format!("{catalog_name}.{schema_name}.{table_name}.{ts_col_name}"),
|
|
||||||
format!("{schema_name}.{table_name}.{ts_col_name}"),
|
|
||||||
format!("{table_name}.{ts_col_name}"),
|
|
||||||
format!("{ts_col_name}"),
|
|
||||||
]);
|
|
||||||
let ts_columns: HashSet<_> = ts_columns
|
|
||||||
.into_iter()
|
|
||||||
.map(Column::from_qualified_name)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let ts_columns_ref: HashSet<&Column> = ts_columns.iter().collect();
|
|
||||||
|
|
||||||
// find the time window expr which refers to the time index column
|
|
||||||
let mut time_window_expr: Option<Expr> = None;
|
|
||||||
let find_time_window_expr = |plan: &LogicalPlan| {
|
|
||||||
let LogicalPlan::Aggregate(aggregate) = plan else {
|
|
||||||
return Ok(TreeNodeRecursion::Continue);
|
|
||||||
};
|
|
||||||
|
|
||||||
for group_expr in &aggregate.group_expr {
|
|
||||||
let refs = group_expr.column_refs();
|
|
||||||
if refs.len() != 1 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let ref_col = refs.iter().next().unwrap();
|
|
||||||
if ts_columns_ref.contains(ref_col) {
|
|
||||||
time_window_expr = Some(group_expr.clone());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(TreeNodeRecursion::Stop)
|
|
||||||
};
|
|
||||||
plan.apply(find_time_window_expr)
|
|
||||||
.with_context(|_| DatafusionSnafu {
|
|
||||||
context: format!("Can't find time window expr in plan {plan:?}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let arrow_schema = Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
|
||||||
ts_col_name.clone(),
|
|
||||||
ts_index.data_type.as_arrow_type(),
|
|
||||||
false,
|
|
||||||
)]));
|
|
||||||
|
|
||||||
let df_schema = DFSchema::from_field_specific_qualified_schema(
|
|
||||||
vec![Some(TableReference::bare(table_name))],
|
|
||||||
&arrow_schema,
|
|
||||||
)
|
|
||||||
.with_context(|_e| DatafusionSnafu {
|
|
||||||
context: format!("Failed to create DFSchema from arrow schema {arrow_schema:?}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// cast current to ts_index's type
|
|
||||||
let new_current = current
|
|
||||||
.convert_to(expected_time_unit)
|
|
||||||
.with_context(|| UnexpectedSnafu {
|
|
||||||
reason: format!("Failed to cast current timestamp {current:?} to {expected_time_unit}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// if no time_window_expr is found, return None
|
|
||||||
if let Some(time_window_expr) = time_window_expr {
|
|
||||||
let lower_bound =
|
|
||||||
find_expr_time_window_lower_bound(&time_window_expr, &df_schema, new_current)?;
|
|
||||||
let upper_bound =
|
|
||||||
find_expr_time_window_upper_bound(&time_window_expr, &df_schema, new_current)?;
|
|
||||||
Ok((ts_col_name, lower_bound, upper_bound))
|
|
||||||
} else {
|
|
||||||
Ok((ts_col_name, None, None))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find the lower bound of time window in given `expr` and `current` timestamp.
|
|
||||||
///
|
|
||||||
/// i.e. for `current="2021-07-01 00:01:01.000"` and `expr=date_bin(INTERVAL '5 minutes', ts) as time_window` and `ts_col=ts`,
|
|
||||||
/// return `Some("2021-07-01 00:00:00.000")` since it's the lower bound
|
|
||||||
/// of current time window given the current timestamp
|
|
||||||
///
|
|
||||||
/// if return None, meaning this time window have no lower bound
|
|
||||||
fn find_expr_time_window_lower_bound(
|
|
||||||
expr: &Expr,
|
|
||||||
df_schema: &DFSchema,
|
|
||||||
current: Timestamp,
|
|
||||||
) -> Result<Option<Timestamp>, Error> {
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
let phy_planner = DefaultPhysicalPlanner::default();
|
|
||||||
|
|
||||||
let phy_expr: PhysicalExprRef = phy_planner
|
|
||||||
.create_physical_expr(expr, df_schema, &SessionContext::new().state())
|
|
||||||
.with_context(|_e| DatafusionSnafu {
|
|
||||||
context: format!(
|
|
||||||
"Failed to create physical expression from {expr:?} using {df_schema:?}"
|
|
||||||
),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let cur_time_window = eval_ts_to_ts(&phy_expr, df_schema, current)?;
|
|
||||||
if cur_time_window == current {
|
|
||||||
return Ok(Some(current));
|
|
||||||
}
|
|
||||||
|
|
||||||
// search to find the lower bound
|
|
||||||
let mut offset: i64 = 1;
|
|
||||||
let lower_bound;
|
|
||||||
let mut upper_bound = Some(current);
|
|
||||||
// first expontial probe to found a range for binary search
|
|
||||||
loop {
|
|
||||||
let Some(next_val) = current.value().checked_sub(offset) else {
|
|
||||||
// no lower bound
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
|
|
||||||
let prev_time_probe = common_time::Timestamp::new(next_val, current.unit());
|
|
||||||
|
|
||||||
let prev_time_window = eval_ts_to_ts(&phy_expr, df_schema, prev_time_probe)?;
|
|
||||||
|
|
||||||
match prev_time_window.cmp(&cur_time_window) {
|
|
||||||
Ordering::Less => {
|
|
||||||
lower_bound = Some(prev_time_probe);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Ordering::Equal => {
|
|
||||||
upper_bound = Some(prev_time_probe);
|
|
||||||
}
|
|
||||||
Ordering::Greater => {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!(
|
|
||||||
"Unsupported time window expression, expect monotonic increasing for time window expression {expr:?}"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(new_offset) = offset.checked_mul(2) else {
|
|
||||||
// no lower bound
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
offset = new_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
// binary search for the exact lower bound
|
|
||||||
|
|
||||||
ensure!(lower_bound.map(|v|v.unit())==upper_bound.map(|v|v.unit()), UnexpectedSnafu{
|
|
||||||
reason: format!(" unit mismatch for time window expression {expr:?}, found {lower_bound:?} and {upper_bound:?}"),
|
|
||||||
});
|
|
||||||
|
|
||||||
let input_time_unit = lower_bound
|
|
||||||
.context(UnexpectedSnafu {
|
|
||||||
reason: "should have lower bound",
|
|
||||||
})?
|
|
||||||
.unit();
|
|
||||||
|
|
||||||
let mut low = lower_bound
|
|
||||||
.context(UnexpectedSnafu {
|
|
||||||
reason: "should have lower bound",
|
|
||||||
})?
|
|
||||||
.value();
|
|
||||||
let mut high = upper_bound
|
|
||||||
.context(UnexpectedSnafu {
|
|
||||||
reason: "should have upper bound",
|
|
||||||
})?
|
|
||||||
.value();
|
|
||||||
|
|
||||||
while low < high {
|
|
||||||
let mid = (low + high) / 2;
|
|
||||||
let mid_probe = common_time::Timestamp::new(mid, input_time_unit);
|
|
||||||
let mid_time_window = eval_ts_to_ts(&phy_expr, df_schema, mid_probe)?;
|
|
||||||
|
|
||||||
match mid_time_window.cmp(&cur_time_window) {
|
|
||||||
Ordering::Less => low = mid + 1,
|
|
||||||
Ordering::Equal => high = mid,
|
|
||||||
Ordering::Greater => UnexpectedSnafu {
|
|
||||||
reason: format!("Binary search failed for time window expression {expr:?}"),
|
|
||||||
}
|
|
||||||
.fail()?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let final_lower_bound_for_time_window = common_time::Timestamp::new(low, input_time_unit);
|
|
||||||
|
|
||||||
Ok(Some(final_lower_bound_for_time_window))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find the upper bound for time window expression
|
|
||||||
fn find_expr_time_window_upper_bound(
|
|
||||||
expr: &Expr,
|
|
||||||
df_schema: &DFSchema,
|
|
||||||
current: Timestamp,
|
|
||||||
) -> Result<Option<Timestamp>, Error> {
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
let phy_planner = DefaultPhysicalPlanner::default();
|
|
||||||
|
|
||||||
let phy_expr: PhysicalExprRef = phy_planner
|
|
||||||
.create_physical_expr(expr, df_schema, &SessionContext::new().state())
|
|
||||||
.with_context(|_e| DatafusionSnafu {
|
|
||||||
context: format!(
|
|
||||||
"Failed to create physical expression from {expr:?} using {df_schema:?}"
|
|
||||||
),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let cur_time_window = eval_ts_to_ts(&phy_expr, df_schema, current)?;
|
|
||||||
|
|
||||||
// search to find the lower bound
|
|
||||||
let mut offset: i64 = 1;
|
|
||||||
let mut lower_bound = Some(current);
|
|
||||||
let upper_bound;
|
|
||||||
// first expontial probe to found a range for binary search
|
|
||||||
loop {
|
|
||||||
let Some(next_val) = current.value().checked_add(offset) else {
|
|
||||||
// no upper bound if overflow
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
|
|
||||||
let next_time_probe = common_time::Timestamp::new(next_val, current.unit());
|
|
||||||
|
|
||||||
let next_time_window = eval_ts_to_ts(&phy_expr, df_schema, next_time_probe)?;
|
|
||||||
|
|
||||||
match next_time_window.cmp(&cur_time_window) {
|
|
||||||
Ordering::Less => {UnexpectedSnafu {
|
|
||||||
reason: format!(
|
|
||||||
"Unsupported time window expression, expect monotonic increasing for time window expression {expr:?}"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
}
|
|
||||||
Ordering::Equal => {
|
|
||||||
lower_bound = Some(next_time_probe);
|
|
||||||
}
|
|
||||||
Ordering::Greater => {
|
|
||||||
upper_bound = Some(next_time_probe);
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(new_offset) = offset.checked_mul(2) else {
|
|
||||||
// no upper bound if overflow
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
offset = new_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
// binary search for the exact upper bound
|
|
||||||
|
|
||||||
ensure!(lower_bound.map(|v|v.unit())==upper_bound.map(|v|v.unit()), UnexpectedSnafu{
|
|
||||||
reason: format!(" unit mismatch for time window expression {expr:?}, found {lower_bound:?} and {upper_bound:?}"),
|
|
||||||
});
|
|
||||||
|
|
||||||
let output_unit = upper_bound
|
|
||||||
.context(UnexpectedSnafu {
|
|
||||||
reason: "should have lower bound",
|
|
||||||
})?
|
|
||||||
.unit();
|
|
||||||
|
|
||||||
let mut low = lower_bound
|
|
||||||
.context(UnexpectedSnafu {
|
|
||||||
reason: "should have lower bound",
|
|
||||||
})?
|
|
||||||
.value();
|
|
||||||
let mut high = upper_bound
|
|
||||||
.context(UnexpectedSnafu {
|
|
||||||
reason: "should have upper bound",
|
|
||||||
})?
|
|
||||||
.value();
|
|
||||||
while low < high {
|
|
||||||
let mid = (low + high) / 2;
|
|
||||||
let mid_probe = common_time::Timestamp::new(mid, output_unit);
|
|
||||||
let mid_time_window = eval_ts_to_ts(&phy_expr, df_schema, mid_probe)?;
|
|
||||||
|
|
||||||
match mid_time_window.cmp(&cur_time_window) {
|
|
||||||
Ordering::Less => UnexpectedSnafu {
|
|
||||||
reason: format!("Binary search failed for time window expression {expr:?}"),
|
|
||||||
}
|
|
||||||
.fail()?,
|
|
||||||
Ordering::Equal => low = mid + 1,
|
|
||||||
Ordering::Greater => high = mid,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let final_upper_bound_for_time_window = common_time::Timestamp::new(high, output_unit);
|
|
||||||
|
|
||||||
Ok(Some(final_upper_bound_for_time_window))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eval_ts_to_ts(
|
|
||||||
phy: &PhysicalExprRef,
|
|
||||||
df_schema: &DFSchema,
|
|
||||||
input_value: Timestamp,
|
|
||||||
) -> Result<Timestamp, Error> {
|
|
||||||
let ts_vector = match input_value.unit() {
|
|
||||||
TimeUnit::Second => {
|
|
||||||
TimestampSecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
|
||||||
}
|
|
||||||
TimeUnit::Millisecond => {
|
|
||||||
TimestampMillisecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
|
||||||
}
|
|
||||||
TimeUnit::Microsecond => {
|
|
||||||
TimestampMicrosecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
|
||||||
}
|
|
||||||
TimeUnit::Nanosecond => {
|
|
||||||
TimestampNanosecondVector::from_vec(vec![input_value.value()]).to_arrow_array()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let rb = DfRecordBatch::try_new(df_schema.inner().clone(), vec![ts_vector.clone()])
|
|
||||||
.with_context(|_| ArrowSnafu {
|
|
||||||
context: format!("Failed to create record batch from {df_schema:?} and {ts_vector:?}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let eval_res = phy.evaluate(&rb).with_context(|_| DatafusionSnafu {
|
|
||||||
context: format!("Failed to evaluate physical expression {phy:?} on {rb:?}"),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let val = match eval_res {
|
|
||||||
datafusion_expr::ColumnarValue::Array(array) => {
|
|
||||||
let ty = array.data_type();
|
|
||||||
let ty = ConcreteDataType::from_arrow_type(ty);
|
|
||||||
let time_unit = if let ConcreteDataType::Timestamp(ty) = ty {
|
|
||||||
ty.unit()
|
|
||||||
} else {
|
|
||||||
return UnexpectedSnafu {
|
|
||||||
reason: format!("Physical expression {phy:?} evaluated to non-timestamp type"),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
};
|
|
||||||
|
|
||||||
match time_unit {
|
|
||||||
TimeUnit::Second => TimestampSecondVector::try_from_arrow_array(array.clone())
|
|
||||||
.with_context(|_| DatatypesSnafu {
|
|
||||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
|
||||||
})?
|
|
||||||
.get(0),
|
|
||||||
TimeUnit::Millisecond => {
|
|
||||||
TimestampMillisecondVector::try_from_arrow_array(array.clone())
|
|
||||||
.with_context(|_| DatatypesSnafu {
|
|
||||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
|
||||||
})?
|
|
||||||
.get(0)
|
|
||||||
}
|
|
||||||
TimeUnit::Microsecond => {
|
|
||||||
TimestampMicrosecondVector::try_from_arrow_array(array.clone())
|
|
||||||
.with_context(|_| DatatypesSnafu {
|
|
||||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
|
||||||
})?
|
|
||||||
.get(0)
|
|
||||||
}
|
|
||||||
TimeUnit::Nanosecond => {
|
|
||||||
TimestampNanosecondVector::try_from_arrow_array(array.clone())
|
|
||||||
.with_context(|_| DatatypesSnafu {
|
|
||||||
extra: format!("Failed to create vector from arrow array {array:?}"),
|
|
||||||
})?
|
|
||||||
.get(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
datafusion_expr::ColumnarValue::Scalar(scalar) => Value::try_from(scalar.clone())
|
|
||||||
.with_context(|_| DatatypesSnafu {
|
|
||||||
extra: format!("Failed to convert scalar {scalar:?} to value"),
|
|
||||||
})?,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Value::Timestamp(ts) = val {
|
|
||||||
Ok(ts)
|
|
||||||
} else {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!("Expected timestamp in expression {phy:?} but got {val:?}"),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(discord9): a method to found out the precise time window
|
|
||||||
|
|
||||||
/// Find out the `Filter` Node corresponding to outermost `WHERE` and add a new filter expr to it
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct AddFilterRewriter {
|
|
||||||
extra_filter: Expr,
|
|
||||||
is_rewritten: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AddFilterRewriter {
|
|
||||||
fn new(filter: Expr) -> Self {
|
|
||||||
Self {
|
|
||||||
extra_filter: filter,
|
|
||||||
is_rewritten: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeNodeRewriter for AddFilterRewriter {
|
|
||||||
type Node = LogicalPlan;
|
|
||||||
fn f_down(&mut self, node: Self::Node) -> DfResult<Transformed<Self::Node>> {
|
|
||||||
if self.is_rewritten {
|
|
||||||
return Ok(Transformed::no(node));
|
|
||||||
}
|
|
||||||
match node {
|
|
||||||
LogicalPlan::Filter(mut filter) if !filter.having => {
|
|
||||||
filter.predicate = filter.predicate.and(self.extra_filter.clone());
|
|
||||||
self.is_rewritten = true;
|
|
||||||
Ok(Transformed::yes(LogicalPlan::Filter(filter)))
|
|
||||||
}
|
|
||||||
LogicalPlan::TableScan(_) => {
|
|
||||||
// add a new filter
|
|
||||||
let filter =
|
|
||||||
datafusion_expr::Filter::try_new(self.extra_filter.clone(), Arc::new(node))?;
|
|
||||||
self.is_rewritten = true;
|
|
||||||
Ok(Transformed::yes(LogicalPlan::Filter(filter)))
|
|
||||||
}
|
|
||||||
_ => Ok(Transformed::no(node)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn df_plan_to_sql(plan: &LogicalPlan) -> Result<String, Error> {
|
|
||||||
let unparser = Unparser::default();
|
|
||||||
let sql = unparser
|
|
||||||
.plan_to_sql(plan)
|
|
||||||
.with_context(|_e| DatafusionSnafu {
|
|
||||||
context: format!("Failed to unparse logical plan {plan:?}"),
|
|
||||||
})?;
|
|
||||||
Ok(sql.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use datafusion_common::tree_node::TreeNode;
|
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
use session::context::QueryContext;
|
|
||||||
|
|
||||||
use super::{sql_to_df_plan, *};
|
|
||||||
use crate::recording_rules::{df_plan_to_sql, AddFilterRewriter};
|
|
||||||
use crate::test_utils::create_test_query_engine;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_add_filter() {
|
|
||||||
let testcases = vec![
|
|
||||||
(
|
|
||||||
"SELECT number FROM numbers_with_ts GROUP BY number","SELECT numbers_with_ts.number FROM numbers_with_ts WHERE (number > 4) GROUP BY numbers_with_ts.number"
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"SELECT number FROM numbers_with_ts WHERE number < 2 OR number >10",
|
|
||||||
"SELECT numbers_with_ts.number FROM numbers_with_ts WHERE (((numbers_with_ts.number < 2) OR (numbers_with_ts.number > 10)) AND (number > 4))"
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window",
|
|
||||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE (number > 4) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
|
||||||
)
|
|
||||||
];
|
|
||||||
use datafusion_expr::{col, lit};
|
|
||||||
let query_engine = create_test_query_engine();
|
|
||||||
let ctx = QueryContext::arc();
|
|
||||||
|
|
||||||
for (before, after) in testcases {
|
|
||||||
let sql = before;
|
|
||||||
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), sql, false)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut add_filter = AddFilterRewriter::new(col("number").gt(lit(4u32)));
|
|
||||||
let plan = plan.rewrite(&mut add_filter).unwrap().data;
|
|
||||||
let new_sql = df_plan_to_sql(&plan).unwrap();
|
|
||||||
assert_eq!(after, new_sql);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_plan_time_window_lower_bound() {
|
|
||||||
use datafusion_expr::{col, lit};
|
|
||||||
let query_engine = create_test_query_engine();
|
|
||||||
let ctx = QueryContext::arc();
|
|
||||||
|
|
||||||
let testcases = [
|
|
||||||
// same alias is not same column
|
|
||||||
(
|
|
||||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts GROUP BY ts;",
|
|
||||||
Timestamp::new(1740394109, TimeUnit::Second),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(1740394109000, TimeUnit::Millisecond)),
|
|
||||||
Some(Timestamp::new(1740394109001, TimeUnit::Millisecond)),
|
|
||||||
),
|
|
||||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:29' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:48:29.001' AS TIMESTAMP))) GROUP BY numbers_with_ts.ts"
|
|
||||||
),
|
|
||||||
// complex time window index
|
|
||||||
(
|
|
||||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts GROUP BY time_window;",
|
|
||||||
Timestamp::new(1740394109, TimeUnit::Second),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(1740394080, TimeUnit::Second)),
|
|
||||||
Some(Timestamp::new(1740394140, TimeUnit::Second)),
|
|
||||||
),
|
|
||||||
"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')"
|
|
||||||
),
|
|
||||||
// no time index
|
|
||||||
(
|
|
||||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;",
|
|
||||||
Timestamp::new(23, TimeUnit::Millisecond),
|
|
||||||
("ts".to_string(), None, None),
|
|
||||||
"SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;"
|
|
||||||
),
|
|
||||||
// time index
|
|
||||||
(
|
|
||||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
|
||||||
Timestamp::new(23, TimeUnit::Nanosecond),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
|
||||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
|
||||||
),
|
|
||||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
|
||||||
),
|
|
||||||
// on spot
|
|
||||||
(
|
|
||||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
|
||||||
Timestamp::new(0, TimeUnit::Nanosecond),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
|
||||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
|
||||||
),
|
|
||||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
|
||||||
),
|
|
||||||
// different time unit
|
|
||||||
(
|
|
||||||
"SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
|
||||||
Timestamp::new(23_000_000, TimeUnit::Nanosecond),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
|
||||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
|
||||||
),
|
|
||||||
"SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
|
||||||
),
|
|
||||||
// time index with other fields
|
|
||||||
(
|
|
||||||
"SELECT sum(number) as sum_up, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window;",
|
|
||||||
Timestamp::new(23, TimeUnit::Millisecond),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
|
||||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
|
||||||
),
|
|
||||||
"SELECT sum(numbers_with_ts.number) AS sum_up, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)"
|
|
||||||
),
|
|
||||||
// time index with other pks
|
|
||||||
(
|
|
||||||
"SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number;",
|
|
||||||
Timestamp::new(23, TimeUnit::Millisecond),
|
|
||||||
(
|
|
||||||
"ts".to_string(),
|
|
||||||
Some(Timestamp::new(0, TimeUnit::Millisecond)),
|
|
||||||
Some(Timestamp::new(300000, TimeUnit::Millisecond)),
|
|
||||||
),
|
|
||||||
"SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number"
|
|
||||||
),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (sql, current, expected, unparsed) in testcases {
|
|
||||||
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), sql, true)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let real =
|
|
||||||
find_plan_time_window_bound(&plan, current, ctx.clone(), query_engine.clone())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(expected, real);
|
|
||||||
|
|
||||||
let plan = sql_to_df_plan(ctx.clone(), query_engine.clone(), sql, false)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let (col_name, lower, upper) = real;
|
|
||||||
let new_sql = if lower.is_some() {
|
|
||||||
let to_df_literal = |value| {
|
|
||||||
let value = Value::from(value);
|
|
||||||
|
|
||||||
value.try_to_scalar_value(&value.data_type()).unwrap()
|
|
||||||
};
|
|
||||||
let lower = to_df_literal(lower.unwrap());
|
|
||||||
let upper = to_df_literal(upper.unwrap());
|
|
||||||
let expr = col(&col_name)
|
|
||||||
.gt_eq(lit(lower))
|
|
||||||
.and(col(&col_name).lt_eq(lit(upper)));
|
|
||||||
let mut add_filter = AddFilterRewriter::new(expr);
|
|
||||||
let plan = plan.rewrite(&mut add_filter).unwrap().data;
|
|
||||||
df_plan_to_sql(&plan).unwrap()
|
|
||||||
} else {
|
|
||||||
sql.to_string()
|
|
||||||
};
|
|
||||||
assert_eq!(unparsed, new_sql);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,407 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
use common_meta::ddl::create_flow::FlowType;
|
|
||||||
use common_telemetry::tracing::warn;
|
|
||||||
use common_telemetry::{debug, info};
|
|
||||||
use common_time::Timestamp;
|
|
||||||
use datafusion_common::tree_node::TreeNode;
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use query::QueryEngineRef;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use snafu::{ensure, ResultExt};
|
|
||||||
use tokio::sync::oneshot::error::TryRecvError;
|
|
||||||
use tokio::sync::{oneshot, RwLock};
|
|
||||||
use tokio::time::Instant;
|
|
||||||
|
|
||||||
use super::frontend_client::FrontendClient;
|
|
||||||
use super::{df_plan_to_sql, AddFilterRewriter};
|
|
||||||
use crate::adapter::{CreateFlowArgs, FlowId};
|
|
||||||
use crate::error::{DatafusionSnafu, DatatypesSnafu, FlowAlreadyExistSnafu, UnexpectedSnafu};
|
|
||||||
use crate::metrics::{METRIC_FLOW_RULE_ENGINE_QUERY_TIME, METRIC_FLOW_RULE_ENGINE_SLOW_QUERY};
|
|
||||||
use crate::recording_rules::{find_plan_time_window_bound, sql_to_df_plan};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
/// TODO(discord9): make those constants configurable
|
|
||||||
/// The default rule engine query timeout is 10 minutes
|
|
||||||
pub const DEFAULT_RULE_ENGINE_QUERY_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
|
||||||
|
|
||||||
/// will output a warn log for any query that runs for more that 1 minutes, and also every 1 minutes when that query is still running
|
|
||||||
pub const SLOW_QUERY_THRESHOLD: Duration = Duration::from_secs(60);
|
|
||||||
|
|
||||||
/// TODO(discord9): determine how to configure refresh rate
|
|
||||||
pub struct RecordingRuleEngine {
|
|
||||||
tasks: RwLock<BTreeMap<FlowId, RecordingRuleTask>>,
|
|
||||||
shutdown_txs: RwLock<BTreeMap<FlowId, oneshot::Sender<()>>>,
|
|
||||||
frontend_client: Arc<FrontendClient>,
|
|
||||||
engine: QueryEngineRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RecordingRuleEngine {
|
|
||||||
pub fn new(frontend_client: Arc<FrontendClient>, engine: QueryEngineRef) -> Self {
|
|
||||||
Self {
|
|
||||||
tasks: Default::default(),
|
|
||||||
shutdown_txs: Default::default(),
|
|
||||||
frontend_client,
|
|
||||||
engine,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const MIN_REFRESH_DURATION: Duration = Duration::new(5, 0);
|
|
||||||
|
|
||||||
impl RecordingRuleEngine {
|
|
||||||
pub async fn create_flow(&self, args: CreateFlowArgs) -> Result<Option<FlowId>, Error> {
|
|
||||||
let CreateFlowArgs {
|
|
||||||
flow_id,
|
|
||||||
sink_table_name,
|
|
||||||
source_table_ids: _,
|
|
||||||
create_if_not_exists,
|
|
||||||
or_replace,
|
|
||||||
expire_after,
|
|
||||||
comment: _,
|
|
||||||
sql,
|
|
||||||
flow_options,
|
|
||||||
query_ctx,
|
|
||||||
} = args;
|
|
||||||
|
|
||||||
// or replace logic
|
|
||||||
{
|
|
||||||
let is_exist = self.tasks.read().await.contains_key(&flow_id);
|
|
||||||
match (create_if_not_exists, or_replace, is_exist) {
|
|
||||||
// if replace, ignore that old flow exists
|
|
||||||
(_, true, true) => {
|
|
||||||
info!("Replacing flow with id={}", flow_id);
|
|
||||||
}
|
|
||||||
(false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
|
|
||||||
// already exists, and not replace, return None
|
|
||||||
(true, false, true) => {
|
|
||||||
info!("Flow with id={} already exists, do nothing", flow_id);
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
// continue as normal
|
|
||||||
(_, _, false) => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let flow_type = flow_options.get(FlowType::FLOW_TYPE_KEY);
|
|
||||||
|
|
||||||
ensure!(
|
|
||||||
flow_type == Some(&FlowType::RecordingRule.to_string()) || flow_type.is_none(),
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!("Flow type is not RecordingRule nor None, got {flow_type:?}")
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
let Some(query_ctx) = query_ctx else {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: "Query context is None".to_string(),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
};
|
|
||||||
|
|
||||||
let (tx, rx) = oneshot::channel();
|
|
||||||
let task = RecordingRuleTask::new(
|
|
||||||
flow_id,
|
|
||||||
&sql,
|
|
||||||
expire_after,
|
|
||||||
sink_table_name,
|
|
||||||
Arc::new(query_ctx),
|
|
||||||
rx,
|
|
||||||
);
|
|
||||||
|
|
||||||
let task_inner = task.clone();
|
|
||||||
let engine = self.engine.clone();
|
|
||||||
let frontend = self.frontend_client.clone();
|
|
||||||
|
|
||||||
// TODO(discord9): also save handle & use time wheel or what for better
|
|
||||||
let _handle = common_runtime::spawn_global(async move {
|
|
||||||
match task_inner.start_executing(engine, frontend).await {
|
|
||||||
Ok(()) => info!("Flow {} shutdown", task_inner.flow_id),
|
|
||||||
Err(err) => common_telemetry::error!(
|
|
||||||
"Flow {} encounter unrecoverable error: {err:?}",
|
|
||||||
task_inner.flow_id
|
|
||||||
),
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// TODO(discord9): deal with replace logic
|
|
||||||
let replaced_old_task_opt = self.tasks.write().await.insert(flow_id, task);
|
|
||||||
drop(replaced_old_task_opt);
|
|
||||||
|
|
||||||
self.shutdown_txs.write().await.insert(flow_id, tx);
|
|
||||||
|
|
||||||
Ok(Some(flow_id))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
|
|
||||||
if self.tasks.write().await.remove(&flow_id).is_none() {
|
|
||||||
warn!("Flow {flow_id} not found in tasks")
|
|
||||||
}
|
|
||||||
let Some(tx) = self.shutdown_txs.write().await.remove(&flow_id) else {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!("Can't found shutdown tx for flow {flow_id}"),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
};
|
|
||||||
if tx.send(()).is_err() {
|
|
||||||
warn!("Fail to shutdown flow {flow_id} due to receiver already dropped, maybe flow {flow_id} is already dropped?")
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct RecordingRuleTask {
|
|
||||||
flow_id: FlowId,
|
|
||||||
query: String,
|
|
||||||
/// in seconds
|
|
||||||
expire_after: Option<i64>,
|
|
||||||
sink_table_name: [String; 3],
|
|
||||||
state: Arc<RwLock<RecordingRuleState>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RecordingRuleTask {
|
|
||||||
pub fn new(
|
|
||||||
flow_id: FlowId,
|
|
||||||
query: &str,
|
|
||||||
expire_after: Option<i64>,
|
|
||||||
sink_table_name: [String; 3],
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
shutdown_rx: oneshot::Receiver<()>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
flow_id,
|
|
||||||
query: query.to_string(),
|
|
||||||
expire_after,
|
|
||||||
sink_table_name,
|
|
||||||
state: Arc::new(RwLock::new(RecordingRuleState::new(query_ctx, shutdown_rx))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl RecordingRuleTask {
|
|
||||||
/// This should be called in a new tokio task
|
|
||||||
pub async fn start_executing(
|
|
||||||
&self,
|
|
||||||
engine: QueryEngineRef,
|
|
||||||
frontend_client: Arc<FrontendClient>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
// only first query don't need upper bound
|
|
||||||
let mut is_first = true;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// FIXME(discord9): test if need upper bound also works
|
|
||||||
let new_query = self
|
|
||||||
.gen_query_with_time_window(engine.clone(), false)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let insert_into = format!(
|
|
||||||
"INSERT INTO {}.{}.{} {}",
|
|
||||||
self.sink_table_name[0],
|
|
||||||
self.sink_table_name[1],
|
|
||||||
self.sink_table_name[2],
|
|
||||||
new_query
|
|
||||||
);
|
|
||||||
|
|
||||||
if is_first {
|
|
||||||
is_first = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let instant = Instant::now();
|
|
||||||
let flow_id = self.flow_id;
|
|
||||||
let db_client = frontend_client.get_database_client().await?;
|
|
||||||
let peer_addr = db_client.peer.addr;
|
|
||||||
debug!(
|
|
||||||
"Executing flow {flow_id}(expire_after={:?} secs) on {:?} with query {}",
|
|
||||||
self.expire_after, peer_addr, &insert_into
|
|
||||||
);
|
|
||||||
|
|
||||||
let timer = METRIC_FLOW_RULE_ENGINE_QUERY_TIME
|
|
||||||
.with_label_values(&[flow_id.to_string().as_str()])
|
|
||||||
.start_timer();
|
|
||||||
|
|
||||||
let res = db_client.database.sql(&insert_into).await;
|
|
||||||
drop(timer);
|
|
||||||
|
|
||||||
let elapsed = instant.elapsed();
|
|
||||||
if let Ok(res1) = &res {
|
|
||||||
debug!(
|
|
||||||
"Flow {flow_id} executed, result: {res1:?}, elapsed: {:?}",
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
} else if let Err(res) = &res {
|
|
||||||
warn!(
|
|
||||||
"Failed to execute Flow {flow_id} on frontend {}, result: {res:?}, elapsed: {:?} with query: {}",
|
|
||||||
peer_addr, elapsed, &insert_into
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// record slow query
|
|
||||||
if elapsed >= SLOW_QUERY_THRESHOLD {
|
|
||||||
warn!(
|
|
||||||
"Flow {flow_id} on frontend {} executed for {:?} before complete, query: {}",
|
|
||||||
peer_addr, elapsed, &insert_into
|
|
||||||
);
|
|
||||||
METRIC_FLOW_RULE_ENGINE_SLOW_QUERY
|
|
||||||
.with_label_values(&[flow_id.to_string().as_str(), &insert_into, &peer_addr])
|
|
||||||
.observe(elapsed.as_secs_f64());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.after_query_exec(elapsed, res.is_ok());
|
|
||||||
|
|
||||||
let sleep_until = {
|
|
||||||
let mut state = self.state.write().await;
|
|
||||||
match state.shutdown_rx.try_recv() {
|
|
||||||
Ok(()) => break Ok(()),
|
|
||||||
Err(TryRecvError::Closed) => {
|
|
||||||
warn!("Unexpected shutdown flow {flow_id}, shutdown anyway");
|
|
||||||
break Ok(());
|
|
||||||
}
|
|
||||||
Err(TryRecvError::Empty) => (),
|
|
||||||
}
|
|
||||||
state.get_next_start_query_time(None)
|
|
||||||
};
|
|
||||||
tokio::time::sleep_until(sleep_until).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn gen_query_with_time_window(
|
|
||||||
&self,
|
|
||||||
engine: QueryEngineRef,
|
|
||||||
need_upper_bound: bool,
|
|
||||||
) -> Result<String, Error> {
|
|
||||||
let query_ctx = self.state.read().await.query_ctx.clone();
|
|
||||||
let start = SystemTime::now();
|
|
||||||
let since_the_epoch = start
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("Time went backwards");
|
|
||||||
let low_bound = self
|
|
||||||
.expire_after
|
|
||||||
.map(|e| since_the_epoch.as_secs() - e as u64);
|
|
||||||
|
|
||||||
let Some(low_bound) = low_bound else {
|
|
||||||
return Ok(self.query.clone());
|
|
||||||
};
|
|
||||||
|
|
||||||
let low_bound = Timestamp::new_second(low_bound as i64);
|
|
||||||
|
|
||||||
let plan = sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.query, true).await?;
|
|
||||||
|
|
||||||
let (col_name, lower, upper) =
|
|
||||||
find_plan_time_window_bound(&plan, low_bound, query_ctx.clone(), engine.clone())
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let new_sql = {
|
|
||||||
let to_df_literal = |value| -> Result<_, Error> {
|
|
||||||
let value = Value::from(value);
|
|
||||||
let value = value
|
|
||||||
.try_to_scalar_value(&value.data_type())
|
|
||||||
.with_context(|_| DatatypesSnafu {
|
|
||||||
extra: format!("Failed to convert to scalar value: {}", value),
|
|
||||||
})?;
|
|
||||||
Ok(value)
|
|
||||||
};
|
|
||||||
let lower = lower.map(to_df_literal).transpose()?;
|
|
||||||
let upper = upper.map(to_df_literal).transpose()?.and_then(|u| {
|
|
||||||
if need_upper_bound {
|
|
||||||
Some(u)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let expr = {
|
|
||||||
use datafusion_expr::{col, lit};
|
|
||||||
match (lower, upper) {
|
|
||||||
(Some(l), Some(u)) => col(&col_name)
|
|
||||||
.gt_eq(lit(l))
|
|
||||||
.and(col(&col_name).lt_eq(lit(u))),
|
|
||||||
(Some(l), None) => col(&col_name).gt_eq(lit(l)),
|
|
||||||
(None, Some(u)) => col(&col_name).lt(lit(u)),
|
|
||||||
// no time window, direct return
|
|
||||||
(None, None) => return Ok(self.query.clone()),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut add_filter = AddFilterRewriter::new(expr);
|
|
||||||
// make a not optimized plan for clearer unparse
|
|
||||||
let plan =
|
|
||||||
sql_to_df_plan(query_ctx.clone(), engine.clone(), &self.query, false).await?;
|
|
||||||
let plan = plan
|
|
||||||
.clone()
|
|
||||||
.rewrite(&mut add_filter)
|
|
||||||
.with_context(|_| DatafusionSnafu {
|
|
||||||
context: format!("Failed to rewrite plan {plan:?}"),
|
|
||||||
})?
|
|
||||||
.data;
|
|
||||||
df_plan_to_sql(&plan)?
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(new_sql)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct RecordingRuleState {
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
/// last query complete time
|
|
||||||
last_update_time: Instant,
|
|
||||||
/// last time query duration
|
|
||||||
last_query_duration: Duration,
|
|
||||||
exec_state: ExecState,
|
|
||||||
shutdown_rx: oneshot::Receiver<()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RecordingRuleState {
|
|
||||||
pub fn new(query_ctx: QueryContextRef, shutdown_rx: oneshot::Receiver<()>) -> Self {
|
|
||||||
Self {
|
|
||||||
query_ctx,
|
|
||||||
last_update_time: Instant::now(),
|
|
||||||
last_query_duration: Duration::from_secs(0),
|
|
||||||
exec_state: ExecState::Idle,
|
|
||||||
shutdown_rx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// called after last query is done
|
|
||||||
/// `is_succ` indicate whether the last query is successful
|
|
||||||
pub fn after_query_exec(&mut self, elapsed: Duration, _is_succ: bool) {
|
|
||||||
self.exec_state = ExecState::Idle;
|
|
||||||
self.last_query_duration = elapsed;
|
|
||||||
self.last_update_time = Instant::now();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// wait for at least `last_query_duration`, at most `max_timeout` to start next query
|
|
||||||
pub fn get_next_start_query_time(&self, max_timeout: Option<Duration>) -> Instant {
|
|
||||||
let next_duration = max_timeout
|
|
||||||
.unwrap_or(self.last_query_duration)
|
|
||||||
.min(self.last_query_duration);
|
|
||||||
let next_duration = next_duration.max(MIN_REFRESH_DURATION);
|
|
||||||
|
|
||||||
self.last_update_time + next_duration
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
enum ExecState {
|
|
||||||
Idle,
|
|
||||||
Executing,
|
|
||||||
}
|
|
||||||
@@ -1,150 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//! Frontend client to run flow as recording rule which is time-window-aware normal query triggered every tick set by user
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
|
||||||
use common_meta::cluster::{NodeInfo, NodeInfoKey, Role};
|
|
||||||
use common_meta::peer::Peer;
|
|
||||||
use common_meta::rpc::store::RangeRequest;
|
|
||||||
use meta_client::client::MetaClient;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
|
|
||||||
use crate::error::{ExternalSnafu, UnexpectedSnafu};
|
|
||||||
use crate::recording_rules::engine::DEFAULT_RULE_ENGINE_QUERY_TIMEOUT;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
fn default_channel_mgr() -> ChannelManager {
|
|
||||||
let cfg = ChannelConfig::new().timeout(DEFAULT_RULE_ENGINE_QUERY_TIMEOUT);
|
|
||||||
ChannelManager::with_config(cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn client_from_urls(addrs: Vec<String>) -> Client {
|
|
||||||
Client::with_manager_and_urls(default_channel_mgr(), addrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A simple frontend client able to execute sql using grpc protocol
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum FrontendClient {
|
|
||||||
Distributed {
|
|
||||||
meta_client: Arc<MetaClient>,
|
|
||||||
},
|
|
||||||
Standalone {
|
|
||||||
/// for the sake of simplicity still use grpc even in standalone mode
|
|
||||||
/// notice the client here should all be lazy, so that can wait after frontend is booted then make conn
|
|
||||||
/// TODO(discord9): not use grpc under standalone mode
|
|
||||||
database_client: DatabaseWithPeer,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct DatabaseWithPeer {
|
|
||||||
pub database: Database,
|
|
||||||
pub peer: Peer,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DatabaseWithPeer {
|
|
||||||
fn new(database: Database, peer: Peer) -> Self {
|
|
||||||
Self { database, peer }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FrontendClient {
|
|
||||||
pub fn from_meta_client(meta_client: Arc<MetaClient>) -> Self {
|
|
||||||
Self::Distributed { meta_client }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_static_grpc_addr(addr: String) -> Self {
|
|
||||||
let peer = Peer {
|
|
||||||
id: 0,
|
|
||||||
addr: addr.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = client_from_urls(vec![addr]);
|
|
||||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
Self::Standalone {
|
|
||||||
database_client: DatabaseWithPeer::new(database, peer),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FrontendClient {
|
|
||||||
async fn scan_for_frontend(&self) -> Result<Vec<(NodeInfoKey, NodeInfo)>, Error> {
|
|
||||||
let Self::Distributed { meta_client, .. } = self else {
|
|
||||||
return Ok(vec![]);
|
|
||||||
};
|
|
||||||
let cluster_client = meta_client
|
|
||||||
.cluster_client()
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
let cluster_id = meta_client.id().0;
|
|
||||||
let prefix = NodeInfoKey::key_prefix_with_role(cluster_id, Role::Frontend);
|
|
||||||
let req = RangeRequest::new().with_prefix(prefix);
|
|
||||||
let resp = cluster_client
|
|
||||||
.range(req)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
let mut res = Vec::with_capacity(resp.kvs.len());
|
|
||||||
for kv in resp.kvs {
|
|
||||||
let key = NodeInfoKey::try_from(kv.key)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
|
|
||||||
let val = NodeInfo::try_from(kv.value)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)?;
|
|
||||||
res.push((key, val));
|
|
||||||
}
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the database with max `last_activity_ts`
|
|
||||||
async fn get_last_active_frontend(&self) -> Result<DatabaseWithPeer, Error> {
|
|
||||||
if let Self::Standalone { database_client } = self {
|
|
||||||
return Ok(database_client.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
let frontends = self.scan_for_frontend().await?;
|
|
||||||
let mut last_activity_ts = i64::MIN;
|
|
||||||
let mut peer = None;
|
|
||||||
for (_key, val) in frontends.iter() {
|
|
||||||
if val.last_activity_ts > last_activity_ts {
|
|
||||||
last_activity_ts = val.last_activity_ts;
|
|
||||||
peer = Some(val.peer.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let Some(peer) = peer else {
|
|
||||||
UnexpectedSnafu {
|
|
||||||
reason: format!("No frontend available: {:?}", frontends),
|
|
||||||
}
|
|
||||||
.fail()?
|
|
||||||
};
|
|
||||||
let client = client_from_urls(vec![peer.addr.clone()]);
|
|
||||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
Ok(DatabaseWithPeer::new(database, peer))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a database client, and possibly update it before returning.
|
|
||||||
pub async fn get_database_client(&self) -> Result<DatabaseWithPeer, Error> {
|
|
||||||
match self {
|
|
||||||
Self::Standalone { database_client } => Ok(database_client.clone()),
|
|
||||||
Self::Distributed { meta_client: _ } => self.get_last_active_frontend().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -57,7 +57,6 @@ use crate::error::{
|
|||||||
};
|
};
|
||||||
use crate::heartbeat::HeartbeatTask;
|
use crate::heartbeat::HeartbeatTask;
|
||||||
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
|
||||||
use crate::recording_rules::{FrontendClient, RecordingRuleEngine};
|
|
||||||
use crate::transform::register_function_to_query_engine;
|
use crate::transform::register_function_to_query_engine;
|
||||||
use crate::utils::{SizeReportSender, StateReportHandler};
|
use crate::utils::{SizeReportSender, StateReportHandler};
|
||||||
use crate::{Error, FlowWorkerManager, FlownodeOptions};
|
use crate::{Error, FlowWorkerManager, FlownodeOptions};
|
||||||
@@ -246,7 +245,6 @@ impl FlownodeInstance {
|
|||||||
self.server.shutdown().await.context(ShutdownServerSnafu)?;
|
self.server.shutdown().await.context(ShutdownServerSnafu)?;
|
||||||
|
|
||||||
if let Some(task) = &self.heartbeat_task {
|
if let Some(task) = &self.heartbeat_task {
|
||||||
info!("Close heartbeat task for flownode");
|
|
||||||
task.shutdown();
|
task.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,8 +271,6 @@ pub struct FlownodeBuilder {
|
|||||||
heartbeat_task: Option<HeartbeatTask>,
|
heartbeat_task: Option<HeartbeatTask>,
|
||||||
/// receive a oneshot sender to send state size report
|
/// receive a oneshot sender to send state size report
|
||||||
state_report_handler: Option<StateReportHandler>,
|
state_report_handler: Option<StateReportHandler>,
|
||||||
/// Client to send sql to frontend
|
|
||||||
frontend_client: Arc<FrontendClient>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlownodeBuilder {
|
impl FlownodeBuilder {
|
||||||
@@ -285,7 +281,6 @@ impl FlownodeBuilder {
|
|||||||
table_meta: TableMetadataManagerRef,
|
table_meta: TableMetadataManagerRef,
|
||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
flow_metadata_manager: FlowMetadataManagerRef,
|
flow_metadata_manager: FlowMetadataManagerRef,
|
||||||
frontend_client: Arc<FrontendClient>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
opts,
|
opts,
|
||||||
@@ -295,7 +290,6 @@ impl FlownodeBuilder {
|
|||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
heartbeat_task: None,
|
heartbeat_task: None,
|
||||||
state_report_handler: None,
|
state_report_handler: None,
|
||||||
frontend_client,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -453,10 +447,7 @@ impl FlownodeBuilder {
|
|||||||
|
|
||||||
let node_id = self.opts.node_id.map(|id| id as u32);
|
let node_id = self.opts.node_id.map(|id| id as u32);
|
||||||
|
|
||||||
let rule_engine =
|
let mut man = FlowWorkerManager::new(node_id, query_engine, table_meta);
|
||||||
RecordingRuleEngine::new(self.frontend_client.clone(), query_engine.clone());
|
|
||||||
|
|
||||||
let mut man = FlowWorkerManager::new(node_id, query_engine, table_meta, rule_engine);
|
|
||||||
for worker_id in 0..num_workers {
|
for worker_id in 0..num_workers {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
|
|
||||||
|
|||||||
@@ -86,8 +86,7 @@ pub fn create_test_query_engine() -> Arc<dyn QueryEngine> {
|
|||||||
|
|
||||||
let schema = vec![
|
let schema = vec![
|
||||||
datatypes::schema::ColumnSchema::new("number", CDT::uint32_datatype(), false),
|
datatypes::schema::ColumnSchema::new("number", CDT::uint32_datatype(), false),
|
||||||
datatypes::schema::ColumnSchema::new("ts", CDT::timestamp_millisecond_datatype(), false)
|
datatypes::schema::ColumnSchema::new("ts", CDT::timestamp_millisecond_datatype(), false),
|
||||||
.with_time_index(true),
|
|
||||||
];
|
];
|
||||||
let mut columns = vec![];
|
let mut columns = vec![];
|
||||||
let numbers = (1..=10).collect_vec();
|
let numbers = (1..=10).collect_vec();
|
||||||
|
|||||||
@@ -112,7 +112,6 @@ impl MetaClientBuilder {
|
|||||||
.enable_store()
|
.enable_store()
|
||||||
.enable_heartbeat()
|
.enable_heartbeat()
|
||||||
.enable_procedure()
|
.enable_procedure()
|
||||||
.enable_access_cluster_info()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn enable_heartbeat(self) -> Self {
|
pub fn enable_heartbeat(self) -> Self {
|
||||||
|
|||||||
@@ -157,7 +157,7 @@ fn extract_base_info(request: &HeartbeatRequest) -> Option<(NodeInfoKey, Peer, P
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
||||||
let key = key.into();
|
let key = (&key).into();
|
||||||
let value = value.try_into().context(InvalidClusterInfoFormatSnafu)?;
|
let value = value.try_into().context(InvalidClusterInfoFormatSnafu)?;
|
||||||
let put_req = PutRequest {
|
let put_req = PutRequest {
|
||||||
key,
|
key,
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBac
|
|||||||
use common_meta::leadership_notifier::{
|
use common_meta::leadership_notifier::{
|
||||||
LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
|
LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
|
||||||
};
|
};
|
||||||
|
use common_meta::node_expiry_listener::NodeExpiryListener;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
||||||
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
||||||
@@ -151,6 +152,8 @@ pub struct MetasrvOptions {
|
|||||||
#[cfg(feature = "pg_kvbackend")]
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
/// Lock id for meta kv election. Only effect when using pg_kvbackend.
|
/// Lock id for meta kv election. Only effect when using pg_kvbackend.
|
||||||
pub meta_election_lock_id: u64,
|
pub meta_election_lock_id: u64,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub node_max_idle_time: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||||
@@ -192,6 +195,7 @@ impl Default for MetasrvOptions {
|
|||||||
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
||||||
#[cfg(feature = "pg_kvbackend")]
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
||||||
|
node_max_idle_time: Duration::from_secs(24 * 60 * 60),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -442,6 +446,10 @@ impl Metasrv {
|
|||||||
leadership_change_notifier.add_listener(self.wal_options_allocator.clone());
|
leadership_change_notifier.add_listener(self.wal_options_allocator.clone());
|
||||||
leadership_change_notifier
|
leadership_change_notifier
|
||||||
.add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
|
.add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
|
||||||
|
leadership_change_notifier.add_listener(Arc::new(NodeExpiryListener::new(
|
||||||
|
self.options.node_max_idle_time,
|
||||||
|
self.in_memory.clone(),
|
||||||
|
)));
|
||||||
if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
|
if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
|
||||||
leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
|
leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,13 +68,15 @@ impl heartbeat_server::Heartbeat for Metasrv {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if pusher_id.is_none() {
|
if pusher_id.is_none() {
|
||||||
pusher_id = register_pusher(&handler_group, header, tx.clone()).await;
|
pusher_id =
|
||||||
|
Some(register_pusher(&handler_group, header, tx.clone()).await);
|
||||||
}
|
}
|
||||||
if let Some(k) = &pusher_id {
|
if let Some(k) = &pusher_id {
|
||||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&[&k.to_string()]);
|
METRIC_META_HEARTBEAT_RECV.with_label_values(&[&k.to_string()]);
|
||||||
} else {
|
} else {
|
||||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&["none"]);
|
METRIC_META_HEARTBEAT_RECV.with_label_values(&["none"]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = handler_group
|
let res = handler_group
|
||||||
.handle(req, ctx.clone())
|
.handle(req, ctx.clone())
|
||||||
.await
|
.await
|
||||||
@@ -173,13 +175,13 @@ async fn register_pusher(
|
|||||||
handler_group: &HeartbeatHandlerGroup,
|
handler_group: &HeartbeatHandlerGroup,
|
||||||
header: &RequestHeader,
|
header: &RequestHeader,
|
||||||
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||||
) -> Option<PusherId> {
|
) -> PusherId {
|
||||||
let role = header.role();
|
let role = header.role();
|
||||||
let id = get_node_id(header);
|
let id = get_node_id(header);
|
||||||
let pusher_id = PusherId::new(role, id);
|
let pusher_id = PusherId::new(role, id);
|
||||||
let pusher = Pusher::new(sender, header);
|
let pusher = Pusher::new(sender, header);
|
||||||
handler_group.register_pusher(pusher_id, pusher).await;
|
handler_group.register_pusher(pusher_id, pusher).await;
|
||||||
Some(pusher_id)
|
pusher_id
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -68,7 +68,6 @@ pub struct Inserter {
|
|||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
partition_manager: PartitionRuleManagerRef,
|
partition_manager: PartitionRuleManagerRef,
|
||||||
node_manager: NodeManagerRef,
|
node_manager: NodeManagerRef,
|
||||||
#[allow(unused)]
|
|
||||||
table_flownode_set_cache: TableFlownodeSetCacheRef,
|
table_flownode_set_cache: TableFlownodeSetCacheRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -336,11 +335,9 @@ impl Inserter {
|
|||||||
|
|
||||||
let InstantAndNormalInsertRequests {
|
let InstantAndNormalInsertRequests {
|
||||||
normal_requests,
|
normal_requests,
|
||||||
instant_requests: _,
|
instant_requests,
|
||||||
} = requests;
|
} = requests;
|
||||||
|
|
||||||
// TODO(discord9): mirror some
|
|
||||||
/*
|
|
||||||
// Mirror requests for source table to flownode asynchronously
|
// Mirror requests for source table to flownode asynchronously
|
||||||
let flow_mirror_task = FlowMirrorTask::new(
|
let flow_mirror_task = FlowMirrorTask::new(
|
||||||
&self.table_flownode_set_cache,
|
&self.table_flownode_set_cache,
|
||||||
@@ -350,7 +347,7 @@ impl Inserter {
|
|||||||
.chain(instant_requests.requests.iter()),
|
.chain(instant_requests.requests.iter()),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
flow_mirror_task.detach(self.node_manager.clone())?;*/
|
flow_mirror_task.detach(self.node_manager.clone())?;
|
||||||
|
|
||||||
// Write requests to datanode and wait for response
|
// Write requests to datanode and wait for response
|
||||||
let write_tasks = self
|
let write_tasks = self
|
||||||
@@ -820,14 +817,12 @@ struct CreateAlterTableResult {
|
|||||||
table_infos: HashMap<TableId, Arc<TableInfo>>,
|
table_infos: HashMap<TableId, Arc<TableInfo>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
struct FlowMirrorTask {
|
struct FlowMirrorTask {
|
||||||
requests: HashMap<Peer, RegionInsertRequests>,
|
requests: HashMap<Peer, RegionInsertRequests>,
|
||||||
num_rows: usize,
|
num_rows: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowMirrorTask {
|
impl FlowMirrorTask {
|
||||||
#[allow(unused)]
|
|
||||||
async fn new(
|
async fn new(
|
||||||
cache: &TableFlownodeSetCacheRef,
|
cache: &TableFlownodeSetCacheRef,
|
||||||
requests: impl Iterator<Item = &RegionInsertRequest>,
|
requests: impl Iterator<Item = &RegionInsertRequest>,
|
||||||
@@ -901,7 +896,6 @@ impl FlowMirrorTask {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
fn detach(self, node_manager: NodeManagerRef) -> Result<()> {
|
fn detach(self, node_manager: NodeManagerRef) -> Result<()> {
|
||||||
crate::metrics::DIST_MIRROR_PENDING_ROW_COUNT.add(self.num_rows as i64);
|
crate::metrics::DIST_MIRROR_PENDING_ROW_COUNT.add(self.num_rows as i64);
|
||||||
for (peer, inserts) in self.requests {
|
for (peer, inserts) in self.requests {
|
||||||
|
|||||||
@@ -583,7 +583,8 @@ impl HistogramFoldStream {
|
|||||||
.expect("field column should not be nullable");
|
.expect("field column should not be nullable");
|
||||||
counters.push(counter);
|
counters.push(counter);
|
||||||
}
|
}
|
||||||
let result = Self::evaluate_row(self.quantile, &bucket, &counters)?;
|
// ignore invalid data
|
||||||
|
let result = Self::evaluate_row(self.quantile, &bucket, &counters).unwrap_or(f64::NAN);
|
||||||
self.output_buffer[self.field_column_index].push_value_ref(ValueRef::from(result));
|
self.output_buffer[self.field_column_index].push_value_ref(ValueRef::from(result));
|
||||||
cursor += bucket_num;
|
cursor += bucket_num;
|
||||||
remaining_rows -= bucket_num;
|
remaining_rows -= bucket_num;
|
||||||
@@ -672,7 +673,7 @@ impl HistogramFoldStream {
|
|||||||
if bucket.len() <= 1 {
|
if bucket.len() <= 1 {
|
||||||
return Ok(f64::NAN);
|
return Ok(f64::NAN);
|
||||||
}
|
}
|
||||||
if *bucket.last().unwrap() != f64::INFINITY {
|
if bucket.last().unwrap().is_finite() {
|
||||||
return Err(DataFusionError::Execution(
|
return Err(DataFusionError::Execution(
|
||||||
"last bucket should be +Inf".to_string(),
|
"last bucket should be +Inf".to_string(),
|
||||||
));
|
));
|
||||||
@@ -692,8 +693,8 @@ impl HistogramFoldStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check input value
|
// check input value
|
||||||
debug_assert!(bucket.windows(2).all(|w| w[0] <= w[1]));
|
debug_assert!(bucket.windows(2).all(|w| w[0] <= w[1]), "{bucket:?}");
|
||||||
debug_assert!(counter.windows(2).all(|w| w[0] <= w[1]));
|
debug_assert!(counter.windows(2).all(|w| w[0] <= w[1]), "{counter:?}");
|
||||||
|
|
||||||
let total = *counter.last().unwrap();
|
let total = *counter.last().unwrap();
|
||||||
let expected_pos = total * quantile;
|
let expected_pos = total * quantile;
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ mod idelta;
|
|||||||
mod predict_linear;
|
mod predict_linear;
|
||||||
mod quantile;
|
mod quantile;
|
||||||
mod resets;
|
mod resets;
|
||||||
|
mod round;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test_util;
|
mod test_util;
|
||||||
|
|
||||||
@@ -39,6 +40,7 @@ pub use idelta::IDelta;
|
|||||||
pub use predict_linear::PredictLinear;
|
pub use predict_linear::PredictLinear;
|
||||||
pub use quantile::QuantileOverTime;
|
pub use quantile::QuantileOverTime;
|
||||||
pub use resets::Resets;
|
pub use resets::Resets;
|
||||||
|
pub use round::Round;
|
||||||
|
|
||||||
pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef, DataFusionError> {
|
pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef, DataFusionError> {
|
||||||
if let ColumnarValue::Array(array) = columnar_value {
|
if let ColumnarValue::Array(array) = columnar_value {
|
||||||
|
|||||||
105
src/promql/src/functions/round.rs
Normal file
105
src/promql/src/functions/round.rs
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datafusion::error::DataFusionError;
|
||||||
|
use datafusion_expr::{create_udf, ColumnarValue, ScalarUDF, Volatility};
|
||||||
|
use datatypes::arrow::array::AsArray;
|
||||||
|
use datatypes::arrow::datatypes::{DataType, Float64Type};
|
||||||
|
use datatypes::compute;
|
||||||
|
|
||||||
|
use crate::functions::extract_array;
|
||||||
|
|
||||||
|
pub struct Round {
|
||||||
|
nearest: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Round {
|
||||||
|
fn new(nearest: f64) -> Self {
|
||||||
|
Self { nearest }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const fn name() -> &'static str {
|
||||||
|
"prom_round"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn input_type() -> Vec<DataType> {
|
||||||
|
vec![DataType::Float64]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn return_type() -> DataType {
|
||||||
|
DataType::Float64
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn scalar_udf(nearest: f64) -> ScalarUDF {
|
||||||
|
create_udf(
|
||||||
|
Self::name(),
|
||||||
|
Self::input_type(),
|
||||||
|
Self::return_type(),
|
||||||
|
Volatility::Immutable,
|
||||||
|
Arc::new(move |input: &_| Self::new(nearest).calc(input)) as _,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
|
||||||
|
assert_eq!(input.len(), 1);
|
||||||
|
|
||||||
|
let value_array = extract_array(&input[0])?;
|
||||||
|
|
||||||
|
if self.nearest == 0.0 {
|
||||||
|
let values = value_array.as_primitive::<Float64Type>();
|
||||||
|
let result = compute::unary::<_, _, Float64Type>(values, |a| a.round());
|
||||||
|
Ok(ColumnarValue::Array(Arc::new(result) as _))
|
||||||
|
} else {
|
||||||
|
let values = value_array.as_primitive::<Float64Type>();
|
||||||
|
let nearest = self.nearest;
|
||||||
|
let result =
|
||||||
|
compute::unary::<_, _, Float64Type>(values, |a| ((a / nearest).round() * nearest));
|
||||||
|
Ok(ColumnarValue::Array(Arc::new(result) as _))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use datatypes::arrow::array::Float64Array;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn test_round_f64(value: Vec<f64>, nearest: f64, expected: Vec<f64>) {
|
||||||
|
let round_udf = Round::scalar_udf(nearest);
|
||||||
|
let input = vec![ColumnarValue::Array(Arc::new(Float64Array::from(value)))];
|
||||||
|
let result = round_udf.invoke_batch(&input, 1).unwrap();
|
||||||
|
let result_array = extract_array(&result).unwrap();
|
||||||
|
assert_eq!(result_array.len(), 1);
|
||||||
|
assert_eq!(
|
||||||
|
result_array.as_primitive::<Float64Type>().values(),
|
||||||
|
&expected
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_round() {
|
||||||
|
test_round_f64(vec![123.456], 0.001, vec![123.456]);
|
||||||
|
test_round_f64(vec![123.456], 0.01, vec![123.46000000000001]);
|
||||||
|
test_round_f64(vec![123.456], 0.1, vec![123.5]);
|
||||||
|
test_round_f64(vec![123.456], 0.0, vec![123.0]);
|
||||||
|
test_round_f64(vec![123.456], 1.0, vec![123.0]);
|
||||||
|
test_round_f64(vec![123.456], 10.0, vec![120.0]);
|
||||||
|
test_round_f64(vec![123.456], 100.0, vec![100.0]);
|
||||||
|
test_round_f64(vec![123.456], 105.0, vec![105.0]);
|
||||||
|
test_round_f64(vec![123.456], 1000.0, vec![0.0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -52,7 +52,7 @@ use promql::extension_plan::{
|
|||||||
use promql::functions::{
|
use promql::functions::{
|
||||||
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, IDelta,
|
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, IDelta,
|
||||||
Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime,
|
Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime,
|
||||||
QuantileOverTime, Rate, Resets, StddevOverTime, StdvarOverTime, SumOverTime,
|
QuantileOverTime, Rate, Resets, Round, StddevOverTime, StdvarOverTime, SumOverTime,
|
||||||
};
|
};
|
||||||
use promql_parser::label::{MatchOp, Matcher, Matchers, METRIC_NAME};
|
use promql_parser::label::{MatchOp, Matcher, Matchers, METRIC_NAME};
|
||||||
use promql_parser::parser::token::TokenType;
|
use promql_parser::parser::token::TokenType;
|
||||||
@@ -200,10 +200,9 @@ impl PromPlanner {
|
|||||||
PromExpr::Paren(ParenExpr { expr }) => {
|
PromExpr::Paren(ParenExpr { expr }) => {
|
||||||
self.prom_expr_to_plan(expr, session_state).await?
|
self.prom_expr_to_plan(expr, session_state).await?
|
||||||
}
|
}
|
||||||
PromExpr::Subquery(SubqueryExpr { .. }) => UnsupportedExprSnafu {
|
PromExpr::Subquery(expr) => {
|
||||||
name: "Prom Subquery",
|
self.prom_subquery_expr_to_plan(session_state, expr).await?
|
||||||
}
|
}
|
||||||
.fail()?,
|
|
||||||
PromExpr::NumberLiteral(lit) => self.prom_number_lit_to_plan(lit)?,
|
PromExpr::NumberLiteral(lit) => self.prom_number_lit_to_plan(lit)?,
|
||||||
PromExpr::StringLiteral(lit) => self.prom_string_lit_to_plan(lit)?,
|
PromExpr::StringLiteral(lit) => self.prom_string_lit_to_plan(lit)?,
|
||||||
PromExpr::VectorSelector(selector) => {
|
PromExpr::VectorSelector(selector) => {
|
||||||
@@ -218,6 +217,48 @@ impl PromPlanner {
|
|||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn prom_subquery_expr_to_plan(
|
||||||
|
&mut self,
|
||||||
|
session_state: &SessionState,
|
||||||
|
subquery_expr: &SubqueryExpr,
|
||||||
|
) -> Result<LogicalPlan> {
|
||||||
|
let SubqueryExpr {
|
||||||
|
expr, range, step, ..
|
||||||
|
} = subquery_expr;
|
||||||
|
|
||||||
|
let current_interval = self.ctx.interval;
|
||||||
|
if let Some(step) = step {
|
||||||
|
self.ctx.interval = step.as_millis() as _;
|
||||||
|
}
|
||||||
|
let current_start = self.ctx.start;
|
||||||
|
self.ctx.start -= range.as_millis() as i64 - self.ctx.interval;
|
||||||
|
let input = self.prom_expr_to_plan(expr, session_state).await?;
|
||||||
|
self.ctx.interval = current_interval;
|
||||||
|
self.ctx.start = current_start;
|
||||||
|
|
||||||
|
ensure!(!range.is_zero(), ZeroRangeSelectorSnafu);
|
||||||
|
let range_ms = range.as_millis() as _;
|
||||||
|
self.ctx.range = Some(range_ms);
|
||||||
|
|
||||||
|
let manipulate = RangeManipulate::new(
|
||||||
|
self.ctx.start,
|
||||||
|
self.ctx.end,
|
||||||
|
self.ctx.interval,
|
||||||
|
range_ms,
|
||||||
|
self.ctx
|
||||||
|
.time_index_column
|
||||||
|
.clone()
|
||||||
|
.expect("time index should be set in `setup_context`"),
|
||||||
|
self.ctx.field_columns.clone(),
|
||||||
|
input,
|
||||||
|
)
|
||||||
|
.context(DataFusionPlanningSnafu)?;
|
||||||
|
|
||||||
|
Ok(LogicalPlan::Extension(Extension {
|
||||||
|
node: Arc::new(manipulate),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
async fn prom_aggr_expr_to_plan(
|
async fn prom_aggr_expr_to_plan(
|
||||||
&mut self,
|
&mut self,
|
||||||
session_state: &SessionState,
|
session_state: &SessionState,
|
||||||
@@ -441,6 +482,7 @@ impl PromPlanner {
|
|||||||
// if left plan or right plan tag is empty, means case like `scalar(...) + host` or `host + scalar(...)`
|
// if left plan or right plan tag is empty, means case like `scalar(...) + host` or `host + scalar(...)`
|
||||||
// under this case we only join on time index
|
// under this case we only join on time index
|
||||||
left_context.tag_columns.is_empty() || right_context.tag_columns.is_empty(),
|
left_context.tag_columns.is_empty() || right_context.tag_columns.is_empty(),
|
||||||
|
modifier,
|
||||||
)?;
|
)?;
|
||||||
let join_plan_schema = join_plan.schema().clone();
|
let join_plan_schema = join_plan.schema().clone();
|
||||||
|
|
||||||
@@ -1468,6 +1510,20 @@ impl PromPlanner {
|
|||||||
|
|
||||||
ScalarFunc::GeneratedExpr
|
ScalarFunc::GeneratedExpr
|
||||||
}
|
}
|
||||||
|
"round" => {
|
||||||
|
let nearest = match other_input_exprs.pop_front() {
|
||||||
|
Some(DfExpr::Literal(ScalarValue::Float64(Some(t)))) => t,
|
||||||
|
Some(DfExpr::Literal(ScalarValue::Int64(Some(t)))) => t as f64,
|
||||||
|
None => 0.0,
|
||||||
|
other => UnexpectedPlanExprSnafu {
|
||||||
|
desc: format!("expected f64 literal as t, but found {:?}", other),
|
||||||
|
}
|
||||||
|
.fail()?,
|
||||||
|
};
|
||||||
|
|
||||||
|
ScalarFunc::DataFusionUdf(Arc::new(Round::scalar_udf(nearest)))
|
||||||
|
}
|
||||||
|
|
||||||
_ => {
|
_ => {
|
||||||
if let Some(f) = session_state.scalar_functions().get(func.name) {
|
if let Some(f) = session_state.scalar_functions().get(func.name) {
|
||||||
ScalarFunc::DataFusionBuiltin(f.clone())
|
ScalarFunc::DataFusionBuiltin(f.clone())
|
||||||
@@ -1674,7 +1730,7 @@ impl PromPlanner {
|
|||||||
ensure!(
|
ensure!(
|
||||||
!src_labels.is_empty(),
|
!src_labels.is_empty(),
|
||||||
FunctionInvalidArgumentSnafu {
|
FunctionInvalidArgumentSnafu {
|
||||||
fn_name: "label_join",
|
fn_name: "label_join"
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -2121,24 +2177,49 @@ impl PromPlanner {
|
|||||||
left_time_index_column: Option<String>,
|
left_time_index_column: Option<String>,
|
||||||
right_time_index_column: Option<String>,
|
right_time_index_column: Option<String>,
|
||||||
only_join_time_index: bool,
|
only_join_time_index: bool,
|
||||||
|
modifier: &Option<BinModifier>,
|
||||||
) -> Result<LogicalPlan> {
|
) -> Result<LogicalPlan> {
|
||||||
let mut left_tag_columns = if only_join_time_index {
|
let mut left_tag_columns = if only_join_time_index {
|
||||||
vec![]
|
BTreeSet::new()
|
||||||
} else {
|
} else {
|
||||||
self.ctx
|
self.ctx
|
||||||
.tag_columns
|
.tag_columns
|
||||||
.iter()
|
.iter()
|
||||||
.map(Column::from_name)
|
.cloned()
|
||||||
.collect::<Vec<_>>()
|
.collect::<BTreeSet<_>>()
|
||||||
};
|
};
|
||||||
let mut right_tag_columns = left_tag_columns.clone();
|
let mut right_tag_columns = left_tag_columns.clone();
|
||||||
|
|
||||||
|
// apply modifier
|
||||||
|
if let Some(modifier) = modifier {
|
||||||
|
// apply label modifier
|
||||||
|
if let Some(matching) = &modifier.matching {
|
||||||
|
match matching {
|
||||||
|
// keeps columns mentioned in `on`
|
||||||
|
LabelModifier::Include(on) => {
|
||||||
|
let mask = on.labels.iter().cloned().collect::<BTreeSet<_>>();
|
||||||
|
left_tag_columns = left_tag_columns.intersection(&mask).cloned().collect();
|
||||||
|
right_tag_columns =
|
||||||
|
right_tag_columns.intersection(&mask).cloned().collect();
|
||||||
|
}
|
||||||
|
// removes columns memtioned in `ignoring`
|
||||||
|
LabelModifier::Exclude(ignoring) => {
|
||||||
|
// doesn't check existence of label
|
||||||
|
for label in &ignoring.labels {
|
||||||
|
let _ = left_tag_columns.remove(label);
|
||||||
|
let _ = right_tag_columns.remove(label);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// push time index column if it exists
|
// push time index column if it exists
|
||||||
if let (Some(left_time_index_column), Some(right_time_index_column)) =
|
if let (Some(left_time_index_column), Some(right_time_index_column)) =
|
||||||
(left_time_index_column, right_time_index_column)
|
(left_time_index_column, right_time_index_column)
|
||||||
{
|
{
|
||||||
left_tag_columns.push(Column::from_name(left_time_index_column));
|
left_tag_columns.insert(left_time_index_column);
|
||||||
right_tag_columns.push(Column::from_name(right_time_index_column));
|
right_tag_columns.insert(right_time_index_column);
|
||||||
}
|
}
|
||||||
|
|
||||||
let right = LogicalPlanBuilder::from(right)
|
let right = LogicalPlanBuilder::from(right)
|
||||||
@@ -2154,7 +2235,16 @@ impl PromPlanner {
|
|||||||
.join(
|
.join(
|
||||||
right,
|
right,
|
||||||
JoinType::Inner,
|
JoinType::Inner,
|
||||||
(left_tag_columns, right_tag_columns),
|
(
|
||||||
|
left_tag_columns
|
||||||
|
.into_iter()
|
||||||
|
.map(Column::from_name)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
right_tag_columns
|
||||||
|
.into_iter()
|
||||||
|
.map(Column::from_name)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.context(DataFusionPlanningSnafu)?
|
.context(DataFusionPlanningSnafu)?
|
||||||
@@ -3340,6 +3430,59 @@ mod test {
|
|||||||
indie_query_plan_compare(query, expected).await;
|
indie_query_plan_compare(query, expected).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_hash_join() {
|
||||||
|
let mut eval_stmt = EvalStmt {
|
||||||
|
expr: PromExpr::NumberLiteral(NumberLiteral { val: 1.0 }),
|
||||||
|
start: UNIX_EPOCH,
|
||||||
|
end: UNIX_EPOCH
|
||||||
|
.checked_add(Duration::from_secs(100_000))
|
||||||
|
.unwrap(),
|
||||||
|
interval: Duration::from_secs(5),
|
||||||
|
lookback_delta: Duration::from_secs(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
let case = r#"http_server_requests_seconds_sum{uri="/accounts/login"} / ignoring(kubernetes_pod_name,kubernetes_namespace) http_server_requests_seconds_count{uri="/accounts/login"}"#;
|
||||||
|
|
||||||
|
let prom_expr = parser::parse(case).unwrap();
|
||||||
|
eval_stmt.expr = prom_expr;
|
||||||
|
let table_provider = build_test_table_provider_with_fields(
|
||||||
|
&[
|
||||||
|
(
|
||||||
|
DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
"http_server_requests_seconds_sum".to_string(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
|
"http_server_requests_seconds_count".to_string(),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
&["uri", "kubernetes_namespace", "kubernetes_pod_name"],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Should be ok
|
||||||
|
let plan = PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_session_state())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let expected = r#"Projection: http_server_requests_seconds_count.uri, http_server_requests_seconds_count.kubernetes_namespace, http_server_requests_seconds_count.kubernetes_pod_name, http_server_requests_seconds_count.greptime_timestamp, http_server_requests_seconds_sum.greptime_value / http_server_requests_seconds_count.greptime_value AS http_server_requests_seconds_sum.greptime_value / http_server_requests_seconds_count.greptime_value
|
||||||
|
Inner Join: http_server_requests_seconds_sum.greptime_timestamp = http_server_requests_seconds_count.greptime_timestamp, http_server_requests_seconds_sum.uri = http_server_requests_seconds_count.uri
|
||||||
|
SubqueryAlias: http_server_requests_seconds_sum
|
||||||
|
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp]
|
||||||
|
PromSeriesNormalize: offset=[0], time index=[greptime_timestamp], filter NaN: [false]
|
||||||
|
PromSeriesDivide: tags=["uri", "kubernetes_namespace", "kubernetes_pod_name"]
|
||||||
|
Sort: http_server_requests_seconds_sum.uri DESC NULLS LAST, http_server_requests_seconds_sum.kubernetes_namespace DESC NULLS LAST, http_server_requests_seconds_sum.kubernetes_pod_name DESC NULLS LAST, http_server_requests_seconds_sum.greptime_timestamp DESC NULLS LAST
|
||||||
|
Filter: http_server_requests_seconds_sum.uri = Utf8("/accounts/login") AND http_server_requests_seconds_sum.greptime_timestamp >= TimestampMillisecond(-1000, None) AND http_server_requests_seconds_sum.greptime_timestamp <= TimestampMillisecond(100001000, None)
|
||||||
|
TableScan: http_server_requests_seconds_sum
|
||||||
|
SubqueryAlias: http_server_requests_seconds_count
|
||||||
|
PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp]
|
||||||
|
PromSeriesNormalize: offset=[0], time index=[greptime_timestamp], filter NaN: [false]
|
||||||
|
PromSeriesDivide: tags=["uri", "kubernetes_namespace", "kubernetes_pod_name"]
|
||||||
|
Sort: http_server_requests_seconds_count.uri DESC NULLS LAST, http_server_requests_seconds_count.kubernetes_namespace DESC NULLS LAST, http_server_requests_seconds_count.kubernetes_pod_name DESC NULLS LAST, http_server_requests_seconds_count.greptime_timestamp DESC NULLS LAST
|
||||||
|
Filter: http_server_requests_seconds_count.uri = Utf8("/accounts/login") AND http_server_requests_seconds_count.greptime_timestamp >= TimestampMillisecond(-1000, None) AND http_server_requests_seconds_count.greptime_timestamp <= TimestampMillisecond(100001000, None)
|
||||||
|
TableScan: http_server_requests_seconds_count"#;
|
||||||
|
assert_eq!(plan.to_string(), expected);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_nested_histogram_quantile() {
|
async fn test_nested_histogram_quantile() {
|
||||||
let mut eval_stmt = EvalStmt {
|
let mut eval_stmt = EvalStmt {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ use common_procedure::options::ProcedureConfig;
|
|||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
|
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
|
||||||
use datanode::datanode::DatanodeBuilder;
|
use datanode::datanode::DatanodeBuilder;
|
||||||
use flow::{FlownodeBuilder, FrontendClient};
|
use flow::FlownodeBuilder;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::instance::{FrontendInstance, Instance, StandaloneDatanodeManager};
|
use frontend::instance::{FrontendInstance, Instance, StandaloneDatanodeManager};
|
||||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||||
@@ -164,15 +164,12 @@ impl GreptimeDbStandaloneBuilder {
|
|||||||
Some(procedure_manager.clone()),
|
Some(procedure_manager.clone()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let fe_server_addr = opts.frontend_options().grpc.bind_addr.clone();
|
|
||||||
let frontend_client = FrontendClient::from_static_grpc_addr(fe_server_addr);
|
|
||||||
let flow_builder = FlownodeBuilder::new(
|
let flow_builder = FlownodeBuilder::new(
|
||||||
Default::default(),
|
Default::default(),
|
||||||
plugins.clone(),
|
plugins.clone(),
|
||||||
table_metadata_manager.clone(),
|
table_metadata_manager.clone(),
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
flow_metadata_manager.clone(),
|
flow_metadata_manager.clone(),
|
||||||
Arc::new(frontend_client),
|
|
||||||
);
|
);
|
||||||
let flownode = Arc::new(flow_builder.build().await.unwrap());
|
let flownode = Arc::new(flow_builder.build().await.unwrap());
|
||||||
|
|
||||||
|
|||||||
@@ -1070,6 +1070,7 @@ fn drop_lines_with_inconsistent_results(input: String) -> String {
|
|||||||
"root =",
|
"root =",
|
||||||
"endpoint =",
|
"endpoint =",
|
||||||
"region =",
|
"region =",
|
||||||
|
"enable_virtual_host_style =",
|
||||||
"cache_path =",
|
"cache_path =",
|
||||||
"cache_capacity =",
|
"cache_capacity =",
|
||||||
"sas_token =",
|
"sas_token =",
|
||||||
|
|||||||
81
tests/cases/standalone/common/promql/round_fn.result
Normal file
81
tests/cases/standalone/common/promql/round_fn.result
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
create table cache_hit (
|
||||||
|
ts timestamp time index,
|
||||||
|
job string,
|
||||||
|
greptime_value double,
|
||||||
|
primary key (job)
|
||||||
|
);
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
insert into cache_hit values
|
||||||
|
(3000, "read", 123.45),
|
||||||
|
(3000, "write", 234.567),
|
||||||
|
(4000, "read", 345.678),
|
||||||
|
(4000, "write", 456.789);
|
||||||
|
|
||||||
|
Affected Rows: 4
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 0.01);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| ts | prom_round(greptime_value) | job |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| 1970-01-01T00:00:03 | 123.45 | read |
|
||||||
|
| 1970-01-01T00:00:03 | 234.57 | write |
|
||||||
|
| 1970-01-01T00:00:04 | 345.68 | read |
|
||||||
|
| 1970-01-01T00:00:04 | 456.79 | write |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 0.1);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| ts | prom_round(greptime_value) | job |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| 1970-01-01T00:00:03 | 123.5 | read |
|
||||||
|
| 1970-01-01T00:00:03 | 234.60000000000002 | write |
|
||||||
|
| 1970-01-01T00:00:04 | 345.70000000000005 | read |
|
||||||
|
| 1970-01-01T00:00:04 | 456.8 | write |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 1.0);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| ts | prom_round(greptime_value) | job |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||||
|
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||||
|
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||||
|
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| ts | prom_round(greptime_value) | job |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| 1970-01-01T00:00:03 | 123.0 | read |
|
||||||
|
| 1970-01-01T00:00:03 | 235.0 | write |
|
||||||
|
| 1970-01-01T00:00:04 | 346.0 | read |
|
||||||
|
| 1970-01-01T00:00:04 | 457.0 | write |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 10.0);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| ts | prom_round(greptime_value) | job |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
| 1970-01-01T00:00:03 | 120.0 | read |
|
||||||
|
| 1970-01-01T00:00:03 | 230.0 | write |
|
||||||
|
| 1970-01-01T00:00:04 | 350.0 | read |
|
||||||
|
| 1970-01-01T00:00:04 | 460.0 | write |
|
||||||
|
+---------------------+----------------------------+-------+
|
||||||
|
|
||||||
|
drop table cache_hit;
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
30
tests/cases/standalone/common/promql/round_fn.sql
Normal file
30
tests/cases/standalone/common/promql/round_fn.sql
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
|
||||||
|
create table cache_hit (
|
||||||
|
ts timestamp time index,
|
||||||
|
job string,
|
||||||
|
greptime_value double,
|
||||||
|
primary key (job)
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into cache_hit values
|
||||||
|
(3000, "read", 123.45),
|
||||||
|
(3000, "write", 234.567),
|
||||||
|
(4000, "read", 345.678),
|
||||||
|
(4000, "write", 456.789);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 0.01);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 0.1);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 1.0);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') round(cache_hit, 10.0);
|
||||||
|
|
||||||
|
drop table cache_hit;
|
||||||
@@ -638,3 +638,78 @@ drop table cache_miss;
|
|||||||
|
|
||||||
Affected Rows: 0
|
Affected Rows: 0
|
||||||
|
|
||||||
|
create table cache_hit_with_null_label (
|
||||||
|
ts timestamp time index,
|
||||||
|
job string,
|
||||||
|
null_label string null,
|
||||||
|
greptime_value double,
|
||||||
|
primary key (job, null_label)
|
||||||
|
);
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
create table cache_miss_with_null_label (
|
||||||
|
ts timestamp time index,
|
||||||
|
job string,
|
||||||
|
null_label string null,
|
||||||
|
greptime_value double,
|
||||||
|
primary key (job, null_label)
|
||||||
|
);
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
insert into cache_hit_with_null_label values
|
||||||
|
(3000, "read", null, 1.0),
|
||||||
|
(3000, "write", null, 2.0),
|
||||||
|
(4000, "read", null, 3.0),
|
||||||
|
(4000, "write", null, 4.0);
|
||||||
|
|
||||||
|
Affected Rows: 4
|
||||||
|
|
||||||
|
insert into cache_miss_with_null_label values
|
||||||
|
(3000, "read", null, 1.0),
|
||||||
|
(3000, "write", null, 2.0),
|
||||||
|
(4000, "read", null, 1.0),
|
||||||
|
(4000, "write", null, 2.0);
|
||||||
|
|
||||||
|
Affected Rows: 4
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
-- null!=null, so it will returns the empty set.
|
||||||
|
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
||||||
|
|
||||||
|
++
|
||||||
|
++
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') cache_hit_with_null_label / ignoring(null_label) (cache_miss_with_null_label + ignoring(null_label) cache_hit_with_null_label);
|
||||||
|
|
||||||
|
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||||
|
| job | null_label | ts | lhs.greptime_value / rhs.cache_miss_with_null_label.greptime_value + cache_hit_with_null_label.greptime_value |
|
||||||
|
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||||
|
| read | | 1970-01-01T00:00:03 | 0.5 |
|
||||||
|
| read | | 1970-01-01T00:00:04 | 0.75 |
|
||||||
|
| write | | 1970-01-01T00:00:03 | 0.5 |
|
||||||
|
| write | | 1970-01-01T00:00:04 | 0.6666666666666666 |
|
||||||
|
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') cache_hit_with_null_label / on(job) (cache_miss_with_null_label + on(job) cache_hit_with_null_label);
|
||||||
|
|
||||||
|
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||||
|
| job | null_label | ts | lhs.greptime_value / rhs.cache_miss_with_null_label.greptime_value + cache_hit_with_null_label.greptime_value |
|
||||||
|
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||||
|
| read | | 1970-01-01T00:00:03 | 0.5 |
|
||||||
|
| read | | 1970-01-01T00:00:04 | 0.75 |
|
||||||
|
| write | | 1970-01-01T00:00:03 | 0.5 |
|
||||||
|
| write | | 1970-01-01T00:00:04 | 0.6666666666666666 |
|
||||||
|
+-------+------------+---------------------+---------------------------------------------------------------------------------------------------------------+
|
||||||
|
|
||||||
|
drop table cache_hit_with_null_label;
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
drop table cache_miss_with_null_label;
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
|||||||
@@ -295,3 +295,45 @@ tql eval (3, 4, '1s') cache_hit / (cache_miss + cache_hit);
|
|||||||
drop table cache_hit;
|
drop table cache_hit;
|
||||||
|
|
||||||
drop table cache_miss;
|
drop table cache_miss;
|
||||||
|
|
||||||
|
create table cache_hit_with_null_label (
|
||||||
|
ts timestamp time index,
|
||||||
|
job string,
|
||||||
|
null_label string null,
|
||||||
|
greptime_value double,
|
||||||
|
primary key (job, null_label)
|
||||||
|
);
|
||||||
|
|
||||||
|
create table cache_miss_with_null_label (
|
||||||
|
ts timestamp time index,
|
||||||
|
job string,
|
||||||
|
null_label string null,
|
||||||
|
greptime_value double,
|
||||||
|
primary key (job, null_label)
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into cache_hit_with_null_label values
|
||||||
|
(3000, "read", null, 1.0),
|
||||||
|
(3000, "write", null, 2.0),
|
||||||
|
(4000, "read", null, 3.0),
|
||||||
|
(4000, "write", null, 4.0);
|
||||||
|
|
||||||
|
insert into cache_miss_with_null_label values
|
||||||
|
(3000, "read", null, 1.0),
|
||||||
|
(3000, "write", null, 2.0),
|
||||||
|
(4000, "read", null, 1.0),
|
||||||
|
(4000, "write", null, 2.0);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
-- null!=null, so it will returns the empty set.
|
||||||
|
tql eval (3, 4, '1s') cache_hit_with_null_label / (cache_miss_with_null_label + cache_hit_with_null_label);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') cache_hit_with_null_label / ignoring(null_label) (cache_miss_with_null_label + ignoring(null_label) cache_hit_with_null_label);
|
||||||
|
|
||||||
|
-- SQLNESS SORT_RESULT 3 1
|
||||||
|
tql eval (3, 4, '1s') cache_hit_with_null_label / on(job) (cache_miss_with_null_label + on(job) cache_hit_with_null_label);
|
||||||
|
|
||||||
|
drop table cache_hit_with_null_label;
|
||||||
|
|
||||||
|
drop table cache_miss_with_null_label;
|
||||||
|
|||||||
@@ -295,3 +295,40 @@ drop table histogram3_bucket;
|
|||||||
|
|
||||||
Affected Rows: 0
|
Affected Rows: 0
|
||||||
|
|
||||||
|
-- test with invalid data (unaligned buckets)
|
||||||
|
create table histogram4_bucket (
|
||||||
|
ts timestamp time index,
|
||||||
|
le string,
|
||||||
|
s string,
|
||||||
|
val double,
|
||||||
|
primary key (s, le),
|
||||||
|
);
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
insert into histogram4_bucket values
|
||||||
|
(2900000, "0.1", "a", 0),
|
||||||
|
(2900000, "1", "a", 10),
|
||||||
|
(2900000, "5", "a", 20),
|
||||||
|
(2900000, "+Inf", "a", 150),
|
||||||
|
(3000000, "0.1", "a", 50),
|
||||||
|
(3000000, "1", "a", 70),
|
||||||
|
(3000000, "5", "a", 120),
|
||||||
|
-- INF here is missing
|
||||||
|
;
|
||||||
|
|
||||||
|
Affected Rows: 7
|
||||||
|
|
||||||
|
tql eval (2900, 3000, '100s') histogram_quantile(0.9, histogram4_bucket);
|
||||||
|
|
||||||
|
+---------------------+---+-----+
|
||||||
|
| ts | s | val |
|
||||||
|
+---------------------+---+-----+
|
||||||
|
| 1970-01-01T00:48:20 | a | 5.0 |
|
||||||
|
| 1970-01-01T00:50:00 | a | 5.0 |
|
||||||
|
+---------------------+---+-----+
|
||||||
|
|
||||||
|
drop table histogram4_bucket;
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
|||||||
@@ -163,3 +163,27 @@ insert into histogram3_bucket values
|
|||||||
tql eval (3000, 3005, '3s') histogram_quantile(0.5, sum by(le, s) (rate(histogram3_bucket[5m])));
|
tql eval (3000, 3005, '3s') histogram_quantile(0.5, sum by(le, s) (rate(histogram3_bucket[5m])));
|
||||||
|
|
||||||
drop table histogram3_bucket;
|
drop table histogram3_bucket;
|
||||||
|
|
||||||
|
-- test with invalid data (unaligned buckets)
|
||||||
|
create table histogram4_bucket (
|
||||||
|
ts timestamp time index,
|
||||||
|
le string,
|
||||||
|
s string,
|
||||||
|
val double,
|
||||||
|
primary key (s, le),
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into histogram4_bucket values
|
||||||
|
(2900000, "0.1", "a", 0),
|
||||||
|
(2900000, "1", "a", 10),
|
||||||
|
(2900000, "5", "a", 20),
|
||||||
|
(2900000, "+Inf", "a", 150),
|
||||||
|
(3000000, "0.1", "a", 50),
|
||||||
|
(3000000, "1", "a", 70),
|
||||||
|
(3000000, "5", "a", 120),
|
||||||
|
-- INF here is missing
|
||||||
|
;
|
||||||
|
|
||||||
|
tql eval (2900, 3000, '100s') histogram_quantile(0.9, histogram4_bucket);
|
||||||
|
|
||||||
|
drop table histogram4_bucket;
|
||||||
|
|||||||
65
tests/cases/standalone/common/promql/subquery.result
Normal file
65
tests/cases/standalone/common/promql/subquery.result
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
create table metric_total (
|
||||||
|
ts timestamp time index,
|
||||||
|
val double,
|
||||||
|
);
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
|
insert into metric_total values
|
||||||
|
(0, 1),
|
||||||
|
(10000, 2);
|
||||||
|
|
||||||
|
Affected Rows: 2
|
||||||
|
|
||||||
|
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:10s]);
|
||||||
|
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| ts | prom_sum_over_time(ts_range,val) |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| 1970-01-01T00:00:10 | 3.0 |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
|
||||||
|
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:5s]);
|
||||||
|
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| ts | prom_sum_over_time(ts_range,val) |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| 1970-01-01T00:00:10 | 4.0 |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
|
||||||
|
tql eval (300, 300, '1s') sum_over_time(metric_total[50s:10s]);
|
||||||
|
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| ts | prom_sum_over_time(ts_range,val) |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| 1970-01-01T00:05:00 | 10.0 |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
|
||||||
|
tql eval (359, 359, '1s') sum_over_time(metric_total[60s:10s]);
|
||||||
|
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| ts | prom_sum_over_time(ts_range,val) |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
| 1970-01-01T00:05:59 | 2.0 |
|
||||||
|
+---------------------+----------------------------------+
|
||||||
|
|
||||||
|
tql eval (10, 10, '1s') rate(metric_total[20s:10s]);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+
|
||||||
|
| ts | prom_rate(ts_range,val,ts) |
|
||||||
|
+---------------------+----------------------------+
|
||||||
|
| 1970-01-01T00:00:10 | 0.1 |
|
||||||
|
+---------------------+----------------------------+
|
||||||
|
|
||||||
|
tql eval (20, 20, '1s') rate(metric_total[20s:5s]);
|
||||||
|
|
||||||
|
+---------------------+----------------------------+
|
||||||
|
| ts | prom_rate(ts_range,val,ts) |
|
||||||
|
+---------------------+----------------------------+
|
||||||
|
| 1970-01-01T00:00:20 | 0.06666666666666667 |
|
||||||
|
+---------------------+----------------------------+
|
||||||
|
|
||||||
|
drop table metric_total;
|
||||||
|
|
||||||
|
Affected Rows: 0
|
||||||
|
|
||||||
22
tests/cases/standalone/common/promql/subquery.sql
Normal file
22
tests/cases/standalone/common/promql/subquery.sql
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
create table metric_total (
|
||||||
|
ts timestamp time index,
|
||||||
|
val double,
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into metric_total values
|
||||||
|
(0, 1),
|
||||||
|
(10000, 2);
|
||||||
|
|
||||||
|
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:10s]);
|
||||||
|
|
||||||
|
tql eval (10, 10, '1s') sum_over_time(metric_total[50s:5s]);
|
||||||
|
|
||||||
|
tql eval (300, 300, '1s') sum_over_time(metric_total[50s:10s]);
|
||||||
|
|
||||||
|
tql eval (359, 359, '1s') sum_over_time(metric_total[60s:10s]);
|
||||||
|
|
||||||
|
tql eval (10, 10, '1s') rate(metric_total[20s:10s]);
|
||||||
|
|
||||||
|
tql eval (20, 20, '1s') rate(metric_total[20s:5s]);
|
||||||
|
|
||||||
|
drop table metric_total;
|
||||||
@@ -4,6 +4,10 @@ ue = "ue"
|
|||||||
worl = "worl"
|
worl = "worl"
|
||||||
ot = "ot"
|
ot = "ot"
|
||||||
unqualifed = "unqualifed"
|
unqualifed = "unqualifed"
|
||||||
|
typ = "typ"
|
||||||
|
varidic = "varidic"
|
||||||
|
typs = "typs"
|
||||||
|
varadic = "varadic"
|
||||||
|
|
||||||
[files]
|
[files]
|
||||||
extend-exclude = [
|
extend-exclude = [
|
||||||
|
|||||||
Reference in New Issue
Block a user