mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Compare commits
49 Commits
v0.12.0-ni
...
v0.12.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8da5949fc5 | ||
|
|
db6a63ef6c | ||
|
|
f166b93b02 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab | ||
|
|
c1d18d9980 | ||
|
|
5d9faaaf39 | ||
|
|
538875abee | ||
|
|
5ed09c4584 | ||
|
|
3f6a41eac5 | ||
|
|
ff0dcf12c5 | ||
|
|
5b1fca825a | ||
|
|
7bd108e2be | ||
|
|
286f225e50 | ||
|
|
4f988b5ba9 | ||
|
|
500d0852eb | ||
|
|
8d05fb3503 | ||
|
|
d7b6718be0 | ||
|
|
6f0783e17e | ||
|
|
d69e93b91a | ||
|
|
76083892cd | ||
|
|
7981c06989 | ||
|
|
97bb1519f8 | ||
|
|
1d8c9c1843 | ||
|
|
71007e200c | ||
|
|
a0ff9e751e | ||
|
|
f6f617d667 | ||
|
|
e8788088a8 | ||
|
|
53b25c04a2 | ||
|
|
62a8b8b9dc | ||
|
|
c8bdeaaa6a | ||
|
|
81da18e5df | ||
|
|
7c65fddb30 | ||
|
|
421e38c481 | ||
|
|
aada5c1706 | ||
|
|
aa8f119bbb | ||
|
|
19a6d15849 | ||
|
|
073aaefe65 | ||
|
|
77223a0f3e | ||
|
|
4ef038d098 | ||
|
|
deb9520970 | ||
|
|
6bba5e0afa | ||
|
|
f359eeb667 | ||
|
|
009dbad581 | ||
|
|
a2047b096c |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,7 +4,7 @@
|
||||
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Databse Engine
|
||||
## [Module] Database Engine
|
||||
/src/index @zhongzc
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
|
||||
@@ -41,7 +41,14 @@ runs:
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
@@ -52,7 +59,7 @@ runs:
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
@@ -69,8 +76,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
@@ -34,8 +34,8 @@ inputs:
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
required: true
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
|
||||
4
.github/actions/build-images/action.yml
vendored
4
.github/actions/build-images/action.yml
vendored
@@ -22,8 +22,8 @@ inputs:
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: false
|
||||
default: 'true'
|
||||
required: true
|
||||
default: 'false'
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
|
||||
@@ -52,7 +52,7 @@ runs:
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -70,7 +70,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -47,7 +47,6 @@ runs:
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
|
||||
18
.github/actions/release-cn-artifacts/action.yaml
vendored
18
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -51,8 +51,8 @@ inputs:
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
required: true
|
||||
default: 'false'
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: false
|
||||
@@ -64,11 +64,11 @@ inputs:
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
default: "30"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30" # minutes
|
||||
default: "120" # minutes
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -77,13 +77,21 @@ runs:
|
||||
with:
|
||||
path: ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Install s5cmd
|
||||
shell: bash
|
||||
run: |
|
||||
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||
sudo mv s5cmd /usr/local/bin/
|
||||
sudo chmod +x /usr/local/bin/s5cmd
|
||||
|
||||
- name: Release artifacts to cn region
|
||||
uses: nick-invision/retry@v2
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
AWS_REGION: ${{ inputs.aws-cn-region }}
|
||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
|
||||
@@ -8,15 +8,15 @@ inputs:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 1
|
||||
default: 2
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
description: "Image registry"
|
||||
image-repository:
|
||||
image-repository:
|
||||
default: "greptime/greptimedb"
|
||||
description: "Image repository"
|
||||
image-tag:
|
||||
image-tag:
|
||||
default: "latest"
|
||||
description: 'Image tag'
|
||||
etcd-endpoints:
|
||||
@@ -32,12 +32,12 @@ runs:
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
with:
|
||||
timeout_minutes: 3
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
@@ -48,10 +48,10 @@ runs:
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
@@ -59,7 +59,7 @@ runs:
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
@@ -72,7 +72,7 @@ runs:
|
||||
- name: Wait for GreptimeDB
|
||||
shell: bash
|
||||
run: |
|
||||
while true; do
|
||||
while true; do
|
||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
@@ -86,10 +86,10 @@ runs:
|
||||
- name: Print GreptimeDB info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kubectl get all --show-labels -n my-greptimedb
|
||||
- name: Describe Nodes
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
|
||||
@@ -2,13 +2,14 @@ meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
num_topics = 3
|
||||
auto_prune_interval = "30s"
|
||||
trigger_flush_threshold = 100
|
||||
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "120s"
|
||||
@@ -21,7 +22,7 @@ datanode:
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
overwrite_entry_start_id = true
|
||||
frontend:
|
||||
configData: |-
|
||||
[runtime]
|
||||
|
||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -56,7 +56,7 @@ runs:
|
||||
|
||||
- name: Start EC2 runner
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
uses: machulav/ec2-github-runner@v2.3.8
|
||||
id: start-linux-arm64-ec2-runner
|
||||
with:
|
||||
mode: start
|
||||
|
||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -33,7 +33,7 @@ runs:
|
||||
|
||||
- name: Stop EC2 runner
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
uses: machulav/ec2-github-runner@v2.3.8
|
||||
with:
|
||||
mode: stop
|
||||
label: ${{ inputs.label }}
|
||||
|
||||
15
.github/labeler.yaml
vendored
Normal file
15
.github/labeler.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: .github/**
|
||||
|
||||
docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docker/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
dashboard:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: grafana/**
|
||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,24 +8,25 @@ set -e
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from envrionment variables.
|
||||
# Read from environment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty"
|
||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty"
|
||||
exit 1
|
||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||
# NOTE: Need a `v` prefix for the version string.
|
||||
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||
# It will be like 'nightly-20230808-7d0d8dc6'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||
exit 0
|
||||
@@ -35,7 +36,7 @@ function create_version() {
|
||||
# It will be like 'dev-2023080819-f0e7216c'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||
if [ -z "$COMMIT_SHA" ]; then
|
||||
echo "COMMIT_SHA is empty in dev build"
|
||||
echo "COMMIT_SHA is empty in dev build" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||
@@ -45,7 +46,7 @@ function create_version() {
|
||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||
echo "GITHUB_REF_NAME is empty in push event"
|
||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "$GITHUB_REF_NAME"
|
||||
@@ -54,15 +55,15 @@ function create_version() {
|
||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||
else
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# You can run as following examples:
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
create_version
|
||||
|
||||
6
.github/scripts/deploy-greptimedb.sh
vendored
6
.github/scripts/deploy-greptimedb.sh
vendored
@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
|
||||
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEV_BUILDER_IMAGE_TAG=$1
|
||||
|
||||
update_dev_builder_version() {
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: Should specify the dev-builder image tag"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure Git configs.
|
||||
git config --global user.email greptimedb-ci@greptime.com
|
||||
git config --global user.name greptimedb-ci
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update the dev-builder image tag in the Makefile.
|
||||
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||
|
||||
# Commit the changes.
|
||||
git add Makefile
|
||||
git commit -m "ci: update dev-builder image tag"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "ci: update dev-builder image tag" \
|
||||
--body "This PR updates the dev-builder image tag" \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_dev_builder_version
|
||||
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||
|
||||
update_helm_charts_version() {
|
||||
# Configure Git configs.
|
||||
git config --global user.email update-helm-charts-version@greptime.com
|
||||
git config --global user.name update-helm-charts-version
|
||||
|
||||
# Clone helm-charts repository.
|
||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
||||
cd helm-charts
|
||||
|
||||
# Set default remote for gh CLI
|
||||
gh repo set-default GreptimeTeam/helm-charts
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update version.
|
||||
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
||||
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
||||
|
||||
# Update docs.
|
||||
make docs
|
||||
|
||||
# Commit the changes.
|
||||
git add .
|
||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_helm_charts_version
|
||||
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||
|
||||
update_homebrew_greptime_version() {
|
||||
# Configure Git configs.
|
||||
git config --global user.email update-greptime-version@greptime.com
|
||||
git config --global user.name update-greptime-version
|
||||
|
||||
# Clone helm-charts repository.
|
||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
||||
cd homebrew-greptime
|
||||
|
||||
# Set default remote for gh CLI
|
||||
gh repo set-default GreptimeTeam/homebrew-greptime
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update version.
|
||||
make update-greptime-version VERSION=${VERSION}
|
||||
|
||||
# Commit the changes.
|
||||
git add .
|
||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_homebrew_greptime_version
|
||||
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
8
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -33,7 +33,7 @@ function upload_artifacts() {
|
||||
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
s5cmd cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
done
|
||||
}
|
||||
@@ -41,11 +41,11 @@ function upload_artifacts() {
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
aws s3 cp \
|
||||
s5cmd cp \
|
||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||
fi
|
||||
|
||||
@@ -53,7 +53,7 @@ function update_version_info() {
|
||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||
echo "Updating latest-nightly-version.txt"
|
||||
echo "$VERSION" > latest-nightly-version.txt
|
||||
aws s3 cp \
|
||||
s5cmd cp \
|
||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||
fi
|
||||
fi
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -14,7 +14,7 @@ name: Build API docs
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
35
.github/workflows/dev-build.yml
vendored
35
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -55,6 +55,11 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
upload_artifacts_to_s3:
|
||||
type: boolean
|
||||
description: Whether upload artifacts to s3
|
||||
required: false
|
||||
default: false
|
||||
cargo_profile:
|
||||
type: choice
|
||||
description: The cargo profile to use in building GreptimeDB.
|
||||
@@ -83,7 +88,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -218,7 +223,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
@@ -239,6 +244,13 @@ jobs:
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Echo Docker image tag to step summary
|
||||
run: |
|
||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
run: |
|
||||
@@ -251,7 +263,7 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -274,6 +286,7 @@ jobs:
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
@@ -282,7 +295,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -308,7 +321,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -336,7 +349,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
|
||||
78
.github/workflows/develop.yml
vendored
78
.github/workflows/develop.yml
vendored
@@ -22,8 +22,9 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check typos and docs
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -36,7 +37,8 @@ jobs:
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -45,11 +47,12 @@ jobs:
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -71,8 +74,9 @@ jobs:
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
toml:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Toml Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -85,11 +89,12 @@ jobs:
|
||||
run: taplo format --check
|
||||
|
||||
build:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -127,6 +132,7 @@ jobs:
|
||||
version: current
|
||||
|
||||
fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -183,11 +189,13 @@ jobs:
|
||||
max-total-time: 120
|
||||
|
||||
unstable-fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Unstable Fuzz Test
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
@@ -215,12 +223,12 @@ jobs:
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binariy
|
||||
- name: Download pre-built binary
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip bianry
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
@@ -242,13 +250,19 @@ jobs:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
|
||||
build-greptime-ci:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binary (profile-CI)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -267,7 +281,7 @@ jobs:
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime bianry
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||
@@ -285,11 +299,13 @@ jobs:
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
@@ -319,9 +335,9 @@ jobs:
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
name: Setup Kafka cluster
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
- name: Setup Etcd cluster
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -394,6 +410,11 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pod
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
@@ -416,11 +437,13 @@ jobs:
|
||||
docker system prune -f
|
||||
|
||||
distributed-fuzztest-with-chaos:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
@@ -465,9 +488,9 @@ jobs:
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
name: Setup Kafka cluster
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
- name: Setup Etcd cluster
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -541,6 +564,11 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
@@ -563,12 +591,14 @@ jobs:
|
||||
docker system prune -f
|
||||
|
||||
sqlness:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
opts: ""
|
||||
@@ -576,7 +606,7 @@ jobs:
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "Pg Kvbackend"
|
||||
- name: "PostgreSQL KvBackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
@@ -606,8 +636,9 @@ jobs:
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -623,8 +654,9 @@ jobs:
|
||||
run: make fmt-check
|
||||
|
||||
clippy:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Clippy
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -648,6 +680,7 @@ jobs:
|
||||
run: make clippy
|
||||
|
||||
conflict-check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check for conflict
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -658,7 +691,7 @@ jobs:
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
test:
|
||||
if: github.event_name != 'merge_group'
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 60
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
@@ -704,13 +737,14 @@ jobs:
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
coverage:
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
||||
runs-on: ubuntu-22.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -755,6 +789,7 @@ jobs:
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
@@ -768,9 +803,10 @@ jobs:
|
||||
verbose: true
|
||||
|
||||
# compat:
|
||||
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-20.04
|
||||
# runs-on: ubuntu-22.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
|
||||
6
.github/workflows/docbot.yml
vendored
6
.github/workflows/docbot.yml
vendored
@@ -3,9 +3,13 @@ on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
16
.github/workflows/docs.yml
vendored
16
.github/workflows/docs.yml
vendored
@@ -31,7 +31,7 @@ name: CI
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -49,29 +49,29 @@ jobs:
|
||||
|
||||
check:
|
||||
name: Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
|
||||
27
.github/workflows/nightly-build.yml
vendored
27
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -182,7 +182,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
@@ -200,7 +200,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: true
|
||||
push-latest-tag: false
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
@@ -214,7 +214,7 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -240,15 +240,16 @@ jobs:
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: false
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: true
|
||||
push-latest-tag: false
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -274,7 +275,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -302,7 +303,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
env:
|
||||
|
||||
17
.github/workflows/nightly-ci.yml
vendored
17
.github/workflows/nightly-ci.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -107,7 +107,6 @@ jobs:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
@@ -118,22 +117,22 @@ jobs:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
timeout-minutes: 60
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: cachix/install-nix-action@v27
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-24.11
|
||||
- run: nix develop --command cargo build
|
||||
- uses: cachix/install-nix-action@v31
|
||||
- run: nix develop --command cargo check --bin greptime
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||
steps:
|
||||
@@ -146,7 +145,7 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: 'PR Labeling'
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: ".github/labeler.yaml"
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
size-label:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: pascalgn/size-label-action@v0.5.5
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
with:
|
||||
sizes: >
|
||||
{
|
||||
"0": "XS",
|
||||
"100": "S",
|
||||
"300": "M",
|
||||
"1000": "L",
|
||||
"1500": "XL",
|
||||
"2000": "XXL"
|
||||
}
|
||||
@@ -24,12 +24,20 @@ on:
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
update_dev_builder_image_tag:
|
||||
type: boolean
|
||||
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
# The jobs are triggered by the following events:
|
||||
# 1. Manually triggered workflow_dispatch event
|
||||
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
steps:
|
||||
@@ -57,13 +65,13 @@ jobs:
|
||||
version: ${{ env.VERSION }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
@@ -85,7 +93,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -106,7 +114,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -127,7 +135,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -148,7 +156,7 @@ jobs:
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
@@ -162,7 +170,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -176,7 +184,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -190,7 +198,7 @@ jobs:
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
@@ -201,3 +209,24 @@ jobs:
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
|
||||
update-dev-builder-image-tag:
|
||||
name: Update dev-builder image tag
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update dev-builder image tag
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
94
.github/workflows/release.yml
vendored
94
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -88,16 +88,14 @@ env:
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -126,7 +124,7 @@ jobs:
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -135,7 +133,6 @@ jobs:
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
@@ -299,7 +296,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||
steps:
|
||||
@@ -317,6 +314,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
@@ -334,7 +332,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -361,8 +359,9 @@ jobs:
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: true
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
@@ -375,7 +374,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -389,12 +388,12 @@ jobs:
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -420,7 +419,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -442,11 +441,11 @@ jobs:
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
bump-doc-version:
|
||||
name: Bump doc version
|
||||
bump-downstream-repo-versions:
|
||||
name: Bump downstream repo versions
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners]
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
@@ -457,13 +456,58 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump doc version
|
||||
- name: Bump downstream repo versions
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-doc-version.ts
|
||||
run: pnpm tsx bin/bump-versions.ts
|
||||
env:
|
||||
TARGET_REPOS: website,docs,demo
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||
|
||||
bump-helm-charts-version:
|
||||
name: Bump helm charts version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Bump helm charts version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
run: |
|
||||
./.github/scripts/update-helm-charts-version.sh
|
||||
|
||||
bump-homebrew-greptime-version:
|
||||
name: Bump homebrew greptime version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Bump homebrew greptime version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
run: |
|
||||
./.github/scripts/update-homebrew-greptme-version.sh
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
@@ -473,7 +517,7 @@ jobs:
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
|
||||
9
.github/workflows/semantic-pull-request.yml
vendored
9
.github/workflows/semantic-pull-request.yml
vendored
@@ -7,9 +7,16 @@ on:
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write # Add permissions to modify PRs
|
||||
issues: write
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -54,3 +54,10 @@ tests-fuzz/corpus/
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
# github
|
||||
!/.github
|
||||
|
||||
|
||||
21
AUTHOR.md
21
AUTHOR.md
@@ -3,30 +3,28 @@
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [Breeze-P](https://github.com/Breeze-P)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
@@ -34,11 +32,14 @@
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
|
||||
## All Contributors
|
||||
|
||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
|
||||
227
Cargo.lock
generated
227
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -432,7 +432,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"chrono",
|
||||
"half",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"lexical-core",
|
||||
"num",
|
||||
"serde",
|
||||
@@ -710,7 +710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1324,7 +1324,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1475,7 +1475,7 @@ version = "0.13.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6026d8cd82ada8bbcfe337805dd1eb6afdc9e80fa4d57e977b3a36315e0c5525"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"lazy_static",
|
||||
"num-traits",
|
||||
"regex",
|
||||
@@ -1661,7 +1661,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1703,7 +1703,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1712,7 +1712,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1739,7 +1739,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1780,7 +1780,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1841,7 +1841,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1887,7 +1887,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1909,11 +1909,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1938,7 +1938,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1974,7 +1974,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -1987,7 +1987,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"http 1.1.0",
|
||||
"snafu 0.8.5",
|
||||
@@ -1997,7 +1997,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2007,12 +2007,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
"approx 0.5.1",
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"bincode",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
@@ -2030,6 +2032,7 @@ dependencies = [
|
||||
"geo-types",
|
||||
"geohash",
|
||||
"h3o",
|
||||
"hyperloglogplus",
|
||||
"jsonb",
|
||||
"nalgebra 0.33.2",
|
||||
"num",
|
||||
@@ -2046,12 +2049,13 @@ dependencies = [
|
||||
"store-api",
|
||||
"table",
|
||||
"tokio",
|
||||
"uddsketch",
|
||||
"wkt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2068,7 +2072,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2096,7 +2100,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2115,7 +2119,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2129,7 +2133,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2142,7 +2146,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2202,7 +2206,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2211,11 +2215,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2227,7 +2231,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2254,7 +2258,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2262,7 +2266,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2288,7 +2292,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2307,7 +2311,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2337,7 +2341,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2365,7 +2369,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2377,7 +2381,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2395,7 +2399,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2405,7 +2409,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2972,7 +2976,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"libc",
|
||||
"object_store",
|
||||
"parquet",
|
||||
@@ -3032,7 +3036,7 @@ dependencies = [
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datafusion-functions-window-common",
|
||||
"datafusion-physical-expr-common",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"paste",
|
||||
"recursive",
|
||||
"serde_json",
|
||||
@@ -3154,7 +3158,7 @@ dependencies = [
|
||||
"datafusion-physical-expr-common",
|
||||
"datafusion-physical-plan",
|
||||
"half",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"log",
|
||||
"parking_lot 0.12.3",
|
||||
"paste",
|
||||
@@ -3205,7 +3209,7 @@ dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-physical-expr",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itertools 0.13.0",
|
||||
"log",
|
||||
"recursive",
|
||||
@@ -3230,7 +3234,7 @@ dependencies = [
|
||||
"datafusion-physical-expr-common",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itertools 0.13.0",
|
||||
"log",
|
||||
"paste",
|
||||
@@ -3289,7 +3293,7 @@ dependencies = [
|
||||
"futures",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itertools 0.13.0",
|
||||
"log",
|
||||
"once_cell",
|
||||
@@ -3309,7 +3313,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"log",
|
||||
"recursive",
|
||||
"regex",
|
||||
@@ -3336,7 +3340,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3376,6 +3380,7 @@ dependencies = [
|
||||
"meta-client",
|
||||
"metric-engine",
|
||||
"mito2",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"prometheus",
|
||||
"prost 0.13.3",
|
||||
@@ -3387,7 +3392,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3396,7 +3401,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4040,7 +4045,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4150,7 +4155,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4196,6 +4201,7 @@ dependencies = [
|
||||
"meta-client",
|
||||
"nom",
|
||||
"num-traits",
|
||||
"num_cpus",
|
||||
"operator",
|
||||
"partition",
|
||||
"pretty_assertions",
|
||||
@@ -4210,7 +4216,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4265,7 +4271,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4302,6 +4308,7 @@ dependencies = [
|
||||
"log-query",
|
||||
"log-store",
|
||||
"meta-client",
|
||||
"num_cpus",
|
||||
"opentelemetry-proto 0.27.0",
|
||||
"operator",
|
||||
"partition",
|
||||
@@ -4692,7 +4699,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=fc09a5696608d2a0aa718cc835d5cb9c4e8e9387#fc09a5696608d2a0aa718cc835d5cb9c4e8e9387"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
|
||||
dependencies = [
|
||||
"prost 0.13.3",
|
||||
"serde",
|
||||
@@ -4715,7 +4722,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -4734,7 +4741,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"http 1.1.0",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -5284,6 +5291,15 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyperloglogplus"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "621debdf94dcac33e50475fdd76d34d5ea9c0362a834b9db08c3024696c1fbe3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "i_float"
|
||||
version = "1.3.1"
|
||||
@@ -5523,7 +5539,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5572,9 +5588,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.6.0"
|
||||
version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
|
||||
checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.2",
|
||||
@@ -5588,7 +5604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"is-terminal",
|
||||
"itoa",
|
||||
"log",
|
||||
@@ -5935,7 +5951,7 @@ version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6315,7 +6331,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6327,7 +6343,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6418,7 +6434,7 @@ dependencies = [
|
||||
"cactus",
|
||||
"cfgrammar",
|
||||
"filetime",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"lazy_static",
|
||||
"lrtable",
|
||||
"num-traits",
|
||||
@@ -6620,7 +6636,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6647,7 +6663,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6733,7 +6749,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6831,7 +6847,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7528,7 +7544,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7659,7 +7675,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
@@ -7777,7 +7793,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7825,7 +7841,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8062,7 +8078,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8231,7 +8247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8330,7 +8346,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8470,7 +8486,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -8732,7 +8748,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8756,8 +8772,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "promql-parser"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fe99e6f80a79abccf1e8fb48dd63473a36057e600cc6ea36147c8318698ae6f"
|
||||
source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=27abb8e16003a50c720f00d6c85f41f5fa2a2a8e#27abb8e16003a50c720f00d6c85f41f5fa2a2a8e"
|
||||
dependencies = [
|
||||
"cfgrammar",
|
||||
"chrono",
|
||||
@@ -8978,7 +8993,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9019,7 +9034,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9084,7 +9099,7 @@ dependencies = [
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10323,7 +10338,7 @@ version = "1.0.137"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
@@ -10394,7 +10409,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"hex",
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
@@ -10420,7 +10435,7 @@ version = "0.9.34+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
@@ -10429,7 +10444,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -10481,6 +10496,7 @@ dependencies = [
|
||||
"humantime",
|
||||
"humantime-serde",
|
||||
"hyper 1.4.1",
|
||||
"indexmap 2.7.1",
|
||||
"influxdb_line_protocol",
|
||||
"itertools 0.10.5",
|
||||
"json5",
|
||||
@@ -10545,7 +10561,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10854,7 +10870,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10891,12 +10907,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sqlness"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "308a7338f2211813d6e9da117e9b9b7aee5d072872d11a934002fd2bd4ab5276"
|
||||
source = "git+https://github.com/CeresDB/sqlness.git?rev=bb91f31ff58993e07ea89845791235138283a24c#bb91f31ff58993e07ea89845791235138283a24c"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"derive_builder 0.11.2",
|
||||
"duration-str",
|
||||
"futures",
|
||||
"minijinja",
|
||||
"prettydiff",
|
||||
"regex",
|
||||
@@ -10908,7 +10924,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -10922,6 +10938,7 @@ dependencies = [
|
||||
"hex",
|
||||
"local-ip-address",
|
||||
"mysql",
|
||||
"num_cpus",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -11021,7 +11038,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"hashbrown 0.15.2",
|
||||
"hashlink",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"log",
|
||||
"memchr",
|
||||
"once_cell",
|
||||
@@ -11224,7 +11241,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11354,7 +11371,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11535,7 +11552,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11786,7 +11803,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11830,7 +11847,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11896,7 +11913,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.12.0",
|
||||
"substrait 0.12.1",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -12317,7 +12334,7 @@ version = "0.19.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"toml_datetime",
|
||||
"winnow 0.5.40",
|
||||
]
|
||||
@@ -12328,7 +12345,7 @@ version = "0.22.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
|
||||
dependencies = [
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -12466,7 +12483,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"hdrhistogram",
|
||||
"indexmap 2.6.0",
|
||||
"indexmap 2.7.1",
|
||||
"pin-project-lite",
|
||||
"slab",
|
||||
"sync_wrapper 1.0.1",
|
||||
@@ -12954,6 +12971,14 @@ version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
|
||||
|
||||
[[package]]
|
||||
name = "uddsketch"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/timescaledb-toolkit.git?rev=84828fe8fb494a6a61412a3da96517fc80f7bb20#84828fe8fb494a6a61412a3da96517fc80f7bb20"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unescaper"
|
||||
version = "0.1.5"
|
||||
|
||||
@@ -67,7 +67,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.12.0"
|
||||
version = "0.12.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -129,7 +129,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "fc09a5696608d2a0aa718cc835d5cb9c4e8e9387" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -160,7 +160,9 @@ parquet = { version = "53.0.0", default-features = false, features = ["arrow", "
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.4.3", features = ["ser"] }
|
||||
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", features = [
|
||||
"ser",
|
||||
], rev = "27abb8e16003a50c720f00d6c85f41f5fa2a2a8e" }
|
||||
prost = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
|
||||
@@ -152,6 +152,7 @@
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
@@ -318,6 +319,7 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
@@ -491,6 +493,7 @@
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
|
||||
@@ -231,6 +231,7 @@ overwrite_entry_start_id = false
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -497,6 +498,11 @@ aux_path = ""
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The TTL of the staging directory.
|
||||
## Defaults to 7 days.
|
||||
## Setting it to "0s" to disable TTL.
|
||||
staging_ttl = "7d"
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
|
||||
@@ -50,6 +50,9 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
|
||||
@@ -318,6 +318,7 @@ retry_delay = "500ms"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -584,6 +585,11 @@ aux_path = ""
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The TTL of the staging directory.
|
||||
## Defaults to 7 days.
|
||||
## Setting it to "0s" to disable TTL.
|
||||
staging_ttl = "7d"
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -384,8 +384,8 @@
|
||||
"rowHeight": 0.9,
|
||||
"showValue": "auto",
|
||||
"tooltip": {
|
||||
"mode": "none",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -483,8 +483,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.3",
|
||||
@@ -578,8 +578,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.3",
|
||||
@@ -601,7 +601,7 @@
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -684,8 +684,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -878,8 +878,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1124,8 +1124,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1223,8 +1223,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1322,8 +1322,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1456,8 +1456,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1573,8 +1573,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1673,8 +1673,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1773,8 +1773,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -1890,8 +1890,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2002,8 +2002,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2120,8 +2120,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2233,8 +2233,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2334,8 +2334,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2435,8 +2435,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2548,8 +2548,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2661,8 +2661,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2788,8 +2788,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2889,8 +2889,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -2990,8 +2990,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3091,8 +3091,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3191,8 +3191,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3302,8 +3302,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3432,8 +3432,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3543,8 +3543,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3657,8 +3657,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3808,8 +3808,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -3909,8 +3909,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -4011,8 +4011,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
@@ -4113,8 +4113,8 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
|
||||
@@ -15,13 +15,10 @@ common-macro.workspace = true
|
||||
common-time.workspace = true
|
||||
datatypes.workspace = true
|
||||
greptime-proto.workspace = true
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
prost.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.11"
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
|
||||
@@ -15,7 +15,7 @@ api.workspace = true
|
||||
arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
@@ -31,7 +31,7 @@ common-version.workspace = true
|
||||
dashmap.workspace = true
|
||||
datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime.workspace = true
|
||||
itertools.workspace = true
|
||||
@@ -39,7 +39,7 @@ lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
moka = { workspace = true, features = ["future", "sync"] }
|
||||
partition.workspace = true
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
prometheus.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
serde_json.workspace = true
|
||||
@@ -49,7 +49,7 @@ sql.workspace = true
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = "0.1"
|
||||
tokio-stream.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
cache.workspace = true
|
||||
|
||||
@@ -42,7 +42,7 @@ pub struct Instance {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
fn new(instance: MetasrvInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
pub fn new(instance: MetasrvInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
instance,
|
||||
_guard: guard,
|
||||
|
||||
@@ -18,7 +18,7 @@ bytes.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
futures.workspace = true
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
pin-project.workspace = true
|
||||
rand.workspace = true
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
||||
@@ -35,7 +35,7 @@ orc-rust = { version = "0.5", default-features = false, features = [
|
||||
"async",
|
||||
] }
|
||||
parquet.workspace = true
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
rand.workspace = true
|
||||
regex = "1.7"
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,9 +12,11 @@ default = ["geo"]
|
||||
geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.8"
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-trait.workspace = true
|
||||
bincode = "1.3"
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
@@ -32,12 +34,13 @@ geo = { version = "0.29", optional = true }
|
||||
geo-types = { version = "0.7", optional = true }
|
||||
geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
hyperloglogplus = "0.4"
|
||||
jsonb.workspace = true
|
||||
nalgebra.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
once_cell.workspace = true
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
s2 = { version = "0.0.12", optional = true }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
@@ -47,6 +50,7 @@ sql.workspace = true
|
||||
statrs = "0.16"
|
||||
store-api.workspace = true
|
||||
table.workspace = true
|
||||
uddsketch = { git = "https://github.com/GreptimeTeam/timescaledb-toolkit.git", rev = "84828fe8fb494a6a61412a3da96517fc80f7bb20" }
|
||||
wkt = { version = "0.11", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
20
src/common/function/src/aggr.rs
Normal file
20
src/common/function/src/aggr.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod hll;
|
||||
mod uddsketch_state;
|
||||
|
||||
pub(crate) use hll::HllStateType;
|
||||
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
|
||||
pub use uddsketch_state::{UddSketchState, UDDSKETCH_STATE_NAME};
|
||||
319
src/common/function/src/aggr/hll.rs
Normal file
319
src/common/function/src/aggr/hll.rs
Normal file
@@ -0,0 +1,319 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::*;
|
||||
use common_telemetry::trace;
|
||||
use datafusion::arrow::array::ArrayRef;
|
||||
use datafusion::common::cast::{as_binary_array, as_string_array};
|
||||
use datafusion::common::not_impl_err;
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::logical_expr::function::AccumulatorArgs;
|
||||
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF};
|
||||
use datafusion::prelude::create_udaf;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
use hyperloglogplus::{HyperLogLog, HyperLogLogPlus};
|
||||
|
||||
use crate::utils::FixedRandomState;
|
||||
|
||||
pub const HLL_NAME: &str = "hll";
|
||||
pub const HLL_MERGE_NAME: &str = "hll_merge";
|
||||
|
||||
const DEFAULT_PRECISION: u8 = 14;
|
||||
|
||||
pub(crate) type HllStateType = HyperLogLogPlus<String, FixedRandomState>;
|
||||
|
||||
pub struct HllState {
|
||||
hll: HllStateType,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HllState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "HllState<Opaque>")
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HllState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl HllState {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
// Safety: the DEFAULT_PRECISION is fixed and valid
|
||||
hll: HllStateType::new(DEFAULT_PRECISION, FixedRandomState::new()).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a UDF for the `hll` function.
|
||||
///
|
||||
/// `hll` accepts a string column and aggregates the
|
||||
/// values into a HyperLogLog state.
|
||||
pub fn state_udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
HLL_NAME,
|
||||
vec![DataType::Utf8],
|
||||
Arc::new(DataType::Binary),
|
||||
Volatility::Immutable,
|
||||
Arc::new(Self::create_accumulator),
|
||||
Arc::new(vec![DataType::Binary]),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a UDF for the `hll_merge` function.
|
||||
///
|
||||
/// `hll_merge` accepts a binary column of states generated by `hll`
|
||||
/// and merges them into a single state.
|
||||
pub fn merge_udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
HLL_MERGE_NAME,
|
||||
vec![DataType::Binary],
|
||||
Arc::new(DataType::Binary),
|
||||
Volatility::Immutable,
|
||||
Arc::new(Self::create_merge_accumulator),
|
||||
Arc::new(vec![DataType::Binary]),
|
||||
)
|
||||
}
|
||||
|
||||
fn update(&mut self, value: &str) {
|
||||
self.hll.insert(value);
|
||||
}
|
||||
|
||||
fn merge(&mut self, raw: &[u8]) {
|
||||
if let Ok(serialized) = bincode::deserialize::<HllStateType>(raw) {
|
||||
if let Ok(()) = self.hll.merge(&serialized) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
trace!("Warning: Failed to merge HyperLogLog from {:?}", raw);
|
||||
}
|
||||
|
||||
fn create_accumulator(acc_args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||
let data_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||
|
||||
match data_type {
|
||||
DataType::Utf8 => Ok(Box::new(HllState::new())),
|
||||
other => not_impl_err!("{HLL_NAME} does not support data type: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_merge_accumulator(acc_args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||
let data_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||
|
||||
match data_type {
|
||||
DataType::Binary => Ok(Box::new(HllState::new())),
|
||||
other => not_impl_err!("{HLL_MERGE_NAME} does not support data type: {other}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DfAccumulator for HllState {
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &values[0];
|
||||
|
||||
match array.data_type() {
|
||||
DataType::Utf8 => {
|
||||
let string_array = as_string_array(array)?;
|
||||
for value in string_array.iter().flatten() {
|
||||
self.update(value);
|
||||
}
|
||||
}
|
||||
DataType::Binary => {
|
||||
let binary_array = as_binary_array(array)?;
|
||||
for v in binary_array.iter().flatten() {
|
||||
self.merge(v);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return not_impl_err!(
|
||||
"HLL functions do not support data type: {}",
|
||||
array.data_type()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||
Ok(ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self.hll).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize HyperLogLog: {}", e))
|
||||
})?,
|
||||
)))
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
std::mem::size_of_val(&self.hll)
|
||||
}
|
||||
|
||||
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||
Ok(vec![ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self.hll).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize HyperLogLog: {}", e))
|
||||
})?,
|
||||
))])
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &states[0];
|
||||
let binary_array = as_binary_array(array)?;
|
||||
for v in binary_array.iter().flatten() {
|
||||
self.merge(v);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datafusion::arrow::array::{BinaryArray, StringArray};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_hll_basic() {
|
||||
let mut state = HllState::new();
|
||||
state.update("1");
|
||||
state.update("2");
|
||||
state.update("3");
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(hll.count().trunc() as u32, 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_roundtrip() {
|
||||
let mut state = HllState::new();
|
||||
state.update("1");
|
||||
state.update("2");
|
||||
|
||||
// Serialize
|
||||
let serialized = state.evaluate().unwrap();
|
||||
|
||||
// Create new state and merge the serialized data
|
||||
let mut new_state = HllState::new();
|
||||
if let ScalarValue::Binary(Some(bytes)) = &serialized {
|
||||
new_state.merge(bytes);
|
||||
|
||||
// Verify the merged state matches original
|
||||
let result = new_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(new_bytes)) = result {
|
||||
let mut original: HllStateType = bincode::deserialize(bytes).unwrap();
|
||||
let mut merged: HllStateType = bincode::deserialize(&new_bytes).unwrap();
|
||||
assert_eq!(original.count(), merged.count());
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_batch_update() {
|
||||
let mut state = HllState::new();
|
||||
|
||||
// Test string values
|
||||
let str_values = vec!["a", "b", "c", "d", "e", "f", "g", "h", "i"];
|
||||
let str_array = Arc::new(StringArray::from(str_values)) as ArrayRef;
|
||||
state.update_batch(&[str_array]).unwrap();
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(hll.count().trunc() as u32, 9);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_merge_batch() {
|
||||
let mut state1 = HllState::new();
|
||||
state1.update("1");
|
||||
let state1_binary = state1.evaluate().unwrap();
|
||||
|
||||
let mut state2 = HllState::new();
|
||||
state2.update("2");
|
||||
let state2_binary = state2.evaluate().unwrap();
|
||||
|
||||
let mut merged_state = HllState::new();
|
||||
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||
(&state1_binary, &state2_binary)
|
||||
{
|
||||
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||
bytes1.as_slice(),
|
||||
bytes2.as_slice(),
|
||||
])) as ArrayRef;
|
||||
merged_state.merge_batch(&[binary_array]).unwrap();
|
||||
|
||||
let result = merged_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(hll.count().trunc() as u32, 2);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar values");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_merge_function() {
|
||||
// Create two HLL states with different values
|
||||
let mut state1 = HllState::new();
|
||||
state1.update("1");
|
||||
state1.update("2");
|
||||
let state1_binary = state1.evaluate().unwrap();
|
||||
|
||||
let mut state2 = HllState::new();
|
||||
state2.update("2");
|
||||
state2.update("3");
|
||||
let state2_binary = state2.evaluate().unwrap();
|
||||
|
||||
// Create a merge state and merge both states
|
||||
let mut merge_state = HllState::new();
|
||||
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||
(&state1_binary, &state2_binary)
|
||||
{
|
||||
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||
bytes1.as_slice(),
|
||||
bytes2.as_slice(),
|
||||
])) as ArrayRef;
|
||||
merge_state.update_batch(&[binary_array]).unwrap();
|
||||
|
||||
let result = merge_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||
// Should have 3 unique values: "1", "2", "3"
|
||||
assert_eq!(hll.count().trunc() as u32, 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar values");
|
||||
}
|
||||
}
|
||||
}
|
||||
307
src/common/function/src/aggr/uddsketch_state.rs
Normal file
307
src/common/function/src/aggr/uddsketch_state.rs
Normal file
@@ -0,0 +1,307 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::*;
|
||||
use common_telemetry::trace;
|
||||
use datafusion::common::cast::{as_binary_array, as_primitive_array};
|
||||
use datafusion::common::not_impl_err;
|
||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||
use datafusion::logical_expr::function::AccumulatorArgs;
|
||||
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF};
|
||||
use datafusion::physical_plan::expressions::Literal;
|
||||
use datafusion::prelude::create_udaf;
|
||||
use datatypes::arrow::array::ArrayRef;
|
||||
use datatypes::arrow::datatypes::{DataType, Float64Type};
|
||||
use uddsketch::{SketchHashKey, UDDSketch};
|
||||
|
||||
pub const UDDSKETCH_STATE_NAME: &str = "uddsketch_state";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UddSketchState {
|
||||
uddsketch: UDDSketch,
|
||||
}
|
||||
|
||||
impl UddSketchState {
|
||||
pub fn new(bucket_size: u64, error_rate: f64) -> Self {
|
||||
Self {
|
||||
uddsketch: UDDSketch::new(bucket_size, error_rate),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn udf_impl() -> AggregateUDF {
|
||||
create_udaf(
|
||||
UDDSKETCH_STATE_NAME,
|
||||
vec![DataType::Int64, DataType::Float64, DataType::Float64],
|
||||
Arc::new(DataType::Binary),
|
||||
Volatility::Immutable,
|
||||
Arc::new(|args| {
|
||||
let (bucket_size, error_rate) = downcast_accumulator_args(args)?;
|
||||
Ok(Box::new(UddSketchState::new(bucket_size, error_rate)))
|
||||
}),
|
||||
Arc::new(vec![DataType::Binary]),
|
||||
)
|
||||
}
|
||||
|
||||
fn update(&mut self, value: f64) {
|
||||
self.uddsketch.add_value(value);
|
||||
}
|
||||
|
||||
fn merge(&mut self, raw: &[u8]) {
|
||||
if let Ok(uddsketch) = bincode::deserialize::<UDDSketch>(raw) {
|
||||
if uddsketch.count() != 0 {
|
||||
self.uddsketch.merge_sketch(&uddsketch);
|
||||
}
|
||||
} else {
|
||||
trace!("Warning: Failed to deserialize UDDSketch from {:?}", raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn downcast_accumulator_args(args: AccumulatorArgs) -> DfResult<(u64, f64)> {
|
||||
let bucket_size = match args.exprs[0]
|
||||
.as_any()
|
||||
.downcast_ref::<Literal>()
|
||||
.map(|lit| lit.value())
|
||||
{
|
||||
Some(ScalarValue::Int64(Some(value))) => *value as u64,
|
||||
_ => {
|
||||
return not_impl_err!(
|
||||
"{} not supported for bucket size: {}",
|
||||
UDDSKETCH_STATE_NAME,
|
||||
&args.exprs[0]
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let error_rate = match args.exprs[1]
|
||||
.as_any()
|
||||
.downcast_ref::<Literal>()
|
||||
.map(|lit| lit.value())
|
||||
{
|
||||
Some(ScalarValue::Float64(Some(value))) => *value,
|
||||
_ => {
|
||||
return not_impl_err!(
|
||||
"{} not supported for error rate: {}",
|
||||
UDDSKETCH_STATE_NAME,
|
||||
&args.exprs[1]
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
Ok((bucket_size, error_rate))
|
||||
}
|
||||
|
||||
impl DfAccumulator for UddSketchState {
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &values[2]; // the third column is data value
|
||||
let f64_array = as_primitive_array::<Float64Type>(array)?;
|
||||
for v in f64_array.iter().flatten() {
|
||||
self.update(v);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||
Ok(ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self.uddsketch).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize UDDSketch: {}", e))
|
||||
})?,
|
||||
)))
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
// Base size of UDDSketch struct fields
|
||||
let mut total_size = std::mem::size_of::<f64>() * 3 + // alpha, gamma, values_sum
|
||||
std::mem::size_of::<u32>() + // compactions
|
||||
std::mem::size_of::<u64>() * 2; // max_buckets, num_values
|
||||
|
||||
// Size of buckets (SketchHashMap)
|
||||
// Each bucket entry contains:
|
||||
// - SketchHashKey (enum with i64/Zero/Invalid variants)
|
||||
// - SketchHashEntry (count: u64, next: SketchHashKey)
|
||||
let bucket_entry_size = std::mem::size_of::<SketchHashKey>() + // key
|
||||
std::mem::size_of::<u64>() + // count
|
||||
std::mem::size_of::<SketchHashKey>(); // next
|
||||
|
||||
total_size += self.uddsketch.current_buckets_count() * bucket_entry_size;
|
||||
|
||||
total_size
|
||||
}
|
||||
|
||||
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||
Ok(vec![ScalarValue::Binary(Some(
|
||||
bincode::serialize(&self.uddsketch).map_err(|e| {
|
||||
DataFusionError::Internal(format!("Failed to serialize UDDSketch: {}", e))
|
||||
})?,
|
||||
))])
|
||||
}
|
||||
|
||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> {
|
||||
let array = &states[0];
|
||||
let binary_array = as_binary_array(array)?;
|
||||
for v in binary_array.iter().flatten() {
|
||||
self.merge(v);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datafusion::arrow::array::{BinaryArray, Float64Array};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_state_basic() {
|
||||
let mut state = UddSketchState::new(10, 0.01);
|
||||
state.update(1.0);
|
||||
state.update(2.0);
|
||||
state.update(3.0);
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.count(), 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_state_roundtrip() {
|
||||
let mut state = UddSketchState::new(10, 0.01);
|
||||
state.update(1.0);
|
||||
state.update(2.0);
|
||||
|
||||
// Serialize
|
||||
let serialized = state.evaluate().unwrap();
|
||||
|
||||
// Create new state and merge the serialized data
|
||||
let mut new_state = UddSketchState::new(10, 0.01);
|
||||
if let ScalarValue::Binary(Some(bytes)) = &serialized {
|
||||
new_state.merge(bytes);
|
||||
|
||||
// Verify the merged state matches original by comparing deserialized values
|
||||
let original_sketch: UDDSketch = bincode::deserialize(bytes).unwrap();
|
||||
let new_result = new_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(new_bytes)) = new_result {
|
||||
let new_sketch: UDDSketch = bincode::deserialize(&new_bytes).unwrap();
|
||||
assert_eq!(original_sketch.count(), new_sketch.count());
|
||||
assert_eq!(original_sketch.sum(), new_sketch.sum());
|
||||
assert_eq!(original_sketch.mean(), new_sketch.mean());
|
||||
assert_eq!(original_sketch.max_error(), new_sketch.max_error());
|
||||
// Compare a few quantiles to ensure statistical equivalence
|
||||
for q in [0.1, 0.5, 0.9].iter() {
|
||||
assert!(
|
||||
(original_sketch.estimate_quantile(*q) - new_sketch.estimate_quantile(*q))
|
||||
.abs()
|
||||
< 1e-10,
|
||||
"Quantile {} mismatch: original={}, new={}",
|
||||
q,
|
||||
original_sketch.estimate_quantile(*q),
|
||||
new_sketch.estimate_quantile(*q)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_state_batch_update() {
|
||||
let mut state = UddSketchState::new(10, 0.01);
|
||||
let values = vec![1.0f64, 2.0, 3.0];
|
||||
let array = Arc::new(Float64Array::from(values)) as ArrayRef;
|
||||
|
||||
state
|
||||
.update_batch(&[array.clone(), array.clone(), array])
|
||||
.unwrap();
|
||||
|
||||
let result = state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.count(), 3);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_state_merge_batch() {
|
||||
let mut state1 = UddSketchState::new(10, 0.01);
|
||||
state1.update(1.0);
|
||||
let state1_binary = state1.evaluate().unwrap();
|
||||
|
||||
let mut state2 = UddSketchState::new(10, 0.01);
|
||||
state2.update(2.0);
|
||||
let state2_binary = state2.evaluate().unwrap();
|
||||
|
||||
let mut merged_state = UddSketchState::new(10, 0.01);
|
||||
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||
(&state1_binary, &state2_binary)
|
||||
{
|
||||
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||
bytes1.as_slice(),
|
||||
bytes2.as_slice(),
|
||||
])) as ArrayRef;
|
||||
merged_state.merge_batch(&[binary_array]).unwrap();
|
||||
|
||||
let result = merged_state.evaluate().unwrap();
|
||||
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(deserialized.count(), 2);
|
||||
} else {
|
||||
panic!("Expected binary scalar value");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected binary scalar values");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_state_size() {
|
||||
let mut state = UddSketchState::new(10, 0.01);
|
||||
let initial_size = state.size();
|
||||
|
||||
// Add some values to create buckets
|
||||
state.update(1.0);
|
||||
state.update(2.0);
|
||||
state.update(3.0);
|
||||
|
||||
let size_with_values = state.size();
|
||||
assert!(
|
||||
size_with_values > initial_size,
|
||||
"Size should increase after adding values: initial={}, with_values={}",
|
||||
initial_size,
|
||||
size_with_values
|
||||
);
|
||||
|
||||
// Verify size increases with more buckets
|
||||
state.update(10.0); // This should create a new bucket
|
||||
assert!(
|
||||
state.size() > size_with_values,
|
||||
"Size should increase after adding new bucket: prev={}, new={}",
|
||||
size_with_values,
|
||||
state.size()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -22,10 +22,12 @@ use crate::function::{AsyncFunctionRef, FunctionRef};
|
||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||
use crate::scalars::date::DateFunction;
|
||||
use crate::scalars::expression::ExpressionFunction;
|
||||
use crate::scalars::hll_count::HllCalcFunction;
|
||||
use crate::scalars::json::JsonFunction;
|
||||
use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||
use crate::scalars::vector::VectorFunction;
|
||||
use crate::system::SystemFunction;
|
||||
use crate::table::TableFunction;
|
||||
@@ -105,6 +107,8 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
TimestampFunction::register(&function_registry);
|
||||
DateFunction::register(&function_registry);
|
||||
ExpressionFunction::register(&function_registry);
|
||||
UddSketchCalcFunction::register(&function_registry);
|
||||
HllCalcFunction::register(&function_registry);
|
||||
|
||||
// Aggregate functions
|
||||
AggregateFunctions::register(&function_registry);
|
||||
|
||||
@@ -21,6 +21,7 @@ pub mod scalars;
|
||||
mod system;
|
||||
mod table;
|
||||
|
||||
pub mod aggr;
|
||||
pub mod function;
|
||||
pub mod function_registry;
|
||||
pub mod handlers;
|
||||
|
||||
@@ -22,7 +22,9 @@ pub mod matches;
|
||||
pub mod math;
|
||||
pub mod vector;
|
||||
|
||||
pub(crate) mod hll_count;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test;
|
||||
pub(crate) mod timestamp;
|
||||
pub(crate) mod uddsketch_calc;
|
||||
pub mod udf;
|
||||
|
||||
175
src/common/function/src/scalars/hll_count.rs
Normal file
175
src/common/function/src/scalars/hll_count.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Implementation of the scalar function `hll_count`.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::Vector;
|
||||
use datatypes::scalars::{ScalarVector, ScalarVectorBuilder};
|
||||
use datatypes::vectors::{BinaryVector, MutableVector, UInt64VectorBuilder, VectorRef};
|
||||
use hyperloglogplus::HyperLogLog;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::aggr::HllStateType;
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
const NAME: &str = "hll_count";
|
||||
|
||||
/// HllCalcFunction implements the scalar function `hll_count`.
|
||||
///
|
||||
/// It accepts one argument:
|
||||
/// 1. The serialized HyperLogLogPlus state, as produced by the aggregator (binary).
|
||||
///
|
||||
/// For each row, it deserializes the sketch and returns the estimated cardinality.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct HllCalcFunction;
|
||||
|
||||
impl HllCalcFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(HllCalcFunction));
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for HllCalcFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for HllCalcFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::uint64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// Only argument: HyperLogLogPlus state (binary)
|
||||
Signature::exact(
|
||||
vec![ConcreteDataType::binary_datatype()],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
if columns.len() != 1 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("hll_count expects 1 argument, got {}", columns.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let hll_vec = columns[0]
|
||||
.as_any()
|
||||
.downcast_ref::<BinaryVector>()
|
||||
.with_context(|| DowncastVectorSnafu {
|
||||
err_msg: format!("expect BinaryVector, got {}", columns[0].vector_type_name()),
|
||||
})?;
|
||||
let len = hll_vec.len();
|
||||
let mut builder = UInt64VectorBuilder::with_capacity(len);
|
||||
|
||||
for i in 0..len {
|
||||
let hll_opt = hll_vec.get_data(i);
|
||||
|
||||
if hll_opt.is_none() {
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
|
||||
let hll_bytes = hll_opt.unwrap();
|
||||
|
||||
// Deserialize the HyperLogLogPlus from its bincode representation
|
||||
let mut hll: HllStateType = match bincode::deserialize(hll_bytes) {
|
||||
Ok(h) => h,
|
||||
Err(e) => {
|
||||
common_telemetry::trace!("Failed to deserialize HyperLogLogPlus: {}", e);
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
builder.push(Some(hll.count().round() as u64));
|
||||
}
|
||||
|
||||
Ok(builder.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::vectors::BinaryVector;
|
||||
|
||||
use super::*;
|
||||
use crate::utils::FixedRandomState;
|
||||
|
||||
#[test]
|
||||
fn test_hll_count_function() {
|
||||
let function = HllCalcFunction;
|
||||
assert_eq!("hll_count", function.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
function
|
||||
.return_type(&[ConcreteDataType::uint64_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Create a test HLL
|
||||
let mut hll = HllStateType::new(14, FixedRandomState::new()).unwrap();
|
||||
for i in 1..=10 {
|
||||
hll.insert(&i.to_string());
|
||||
}
|
||||
|
||||
let serialized_bytes = bincode::serialize(&hll).unwrap();
|
||||
let args: Vec<VectorRef> = vec![Arc::new(BinaryVector::from(vec![Some(serialized_bytes)]))];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 1);
|
||||
|
||||
// Test cardinality estimate
|
||||
if let datatypes::value::Value::UInt64(v) = result.get(0) {
|
||||
assert_eq!(v, 10);
|
||||
} else {
|
||||
panic!("Expected uint64 value");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hll_count_function_errors() {
|
||||
let function = HllCalcFunction;
|
||||
|
||||
// Test with invalid number of arguments
|
||||
let args: Vec<VectorRef> = vec![];
|
||||
let result = function.eval(FunctionContext::default(), &args);
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("hll_count expects 1 argument"));
|
||||
|
||||
// Test with invalid binary data
|
||||
let args: Vec<VectorRef> = vec![Arc::new(BinaryVector::from(vec![Some(vec![1, 2, 3])]))]; // Invalid binary data
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 1);
|
||||
assert!(matches!(result.get(0), datatypes::value::Value::Null));
|
||||
}
|
||||
}
|
||||
211
src/common/function/src/scalars/uddsketch_calc.rs
Normal file
211
src/common/function/src/scalars/uddsketch_calc.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Implementation of the scalar function `uddsketch_calc`.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::prelude::Vector;
|
||||
use datatypes::scalars::{ScalarVector, ScalarVectorBuilder};
|
||||
use datatypes::vectors::{BinaryVector, Float64VectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::OptionExt;
|
||||
use uddsketch::UDDSketch;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
const NAME: &str = "uddsketch_calc";
|
||||
|
||||
/// UddSketchCalcFunction implements the scalar function `uddsketch_calc`.
|
||||
///
|
||||
/// It accepts two arguments:
|
||||
/// 1. A percentile (as f64) for which to compute the estimated quantile (e.g. 0.95 for p95).
|
||||
/// 2. The serialized UDDSketch state, as produced by the aggregator (binary).
|
||||
///
|
||||
/// For each row, it deserializes the sketch and returns the computed quantile value.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct UddSketchCalcFunction;
|
||||
|
||||
impl UddSketchCalcFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(UddSketchCalcFunction));
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for UddSketchCalcFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for UddSketchCalcFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
// First argument: percentile (float64)
|
||||
// Second argument: UDDSketch state (binary)
|
||||
Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
if columns.len() != 2 {
|
||||
return InvalidFuncArgsSnafu {
|
||||
err_msg: format!("uddsketch_calc expects 2 arguments, got {}", columns.len()),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
let perc_vec = &columns[0];
|
||||
let sketch_vec = columns[1]
|
||||
.as_any()
|
||||
.downcast_ref::<BinaryVector>()
|
||||
.with_context(|| DowncastVectorSnafu {
|
||||
err_msg: format!("expect BinaryVector, got {}", columns[1].vector_type_name()),
|
||||
})?;
|
||||
let len = sketch_vec.len();
|
||||
let mut builder = Float64VectorBuilder::with_capacity(len);
|
||||
|
||||
for i in 0..len {
|
||||
let perc_opt = perc_vec.get(i).as_f64_lossy();
|
||||
let sketch_opt = sketch_vec.get_data(i);
|
||||
|
||||
if sketch_opt.is_none() || perc_opt.is_none() {
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
|
||||
let sketch_bytes = sketch_opt.unwrap();
|
||||
let perc = perc_opt.unwrap();
|
||||
|
||||
// Deserialize the UDDSketch from its bincode representation
|
||||
let sketch: UDDSketch = match bincode::deserialize(sketch_bytes) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
common_telemetry::trace!("Failed to deserialize UDDSketch: {}", e);
|
||||
builder.push_null();
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Compute the estimated quantile from the sketch
|
||||
let result = sketch.estimate_quantile(perc);
|
||||
builder.push(Some(result));
|
||||
}
|
||||
|
||||
Ok(builder.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::vectors::{BinaryVector, Float64Vector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_calc_function() {
|
||||
let function = UddSketchCalcFunction;
|
||||
assert_eq!("uddsketch_calc", function.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
function
|
||||
.return_type(&[ConcreteDataType::float64_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Create a test sketch
|
||||
let mut sketch = UDDSketch::new(128, 0.01);
|
||||
sketch.add_value(10.0);
|
||||
sketch.add_value(20.0);
|
||||
sketch.add_value(30.0);
|
||||
sketch.add_value(40.0);
|
||||
sketch.add_value(50.0);
|
||||
sketch.add_value(60.0);
|
||||
sketch.add_value(70.0);
|
||||
sketch.add_value(80.0);
|
||||
sketch.add_value(90.0);
|
||||
sketch.add_value(100.0);
|
||||
|
||||
// Get expected values directly from the sketch
|
||||
let expected_p50 = sketch.estimate_quantile(0.5);
|
||||
let expected_p90 = sketch.estimate_quantile(0.9);
|
||||
let expected_p95 = sketch.estimate_quantile(0.95);
|
||||
|
||||
let serialized = bincode::serialize(&sketch).unwrap();
|
||||
let percentiles = vec![0.5, 0.9, 0.95];
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Float64Vector::from_vec(percentiles.clone())),
|
||||
Arc::new(BinaryVector::from(vec![Some(serialized.clone()); 3])),
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 3);
|
||||
|
||||
// Test median (p50)
|
||||
assert!(
|
||||
matches!(result.get(0), datatypes::value::Value::Float64(v) if (v - expected_p50).abs() < 1e-10)
|
||||
);
|
||||
// Test p90
|
||||
assert!(
|
||||
matches!(result.get(1), datatypes::value::Value::Float64(v) if (v - expected_p90).abs() < 1e-10)
|
||||
);
|
||||
// Test p95
|
||||
assert!(
|
||||
matches!(result.get(2), datatypes::value::Value::Float64(v) if (v - expected_p95).abs() < 1e-10)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uddsketch_calc_function_errors() {
|
||||
let function = UddSketchCalcFunction;
|
||||
|
||||
// Test with invalid number of arguments
|
||||
let args: Vec<VectorRef> = vec![Arc::new(Float64Vector::from_vec(vec![0.95]))];
|
||||
let result = function.eval(FunctionContext::default(), &args);
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("uddsketch_calc expects 2 arguments"));
|
||||
|
||||
// Test with invalid binary data
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Float64Vector::from_vec(vec![0.95])),
|
||||
Arc::new(BinaryVector::from(vec![Some(vec![1, 2, 3])])), // Invalid binary data
|
||||
];
|
||||
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(result.len(), 1);
|
||||
assert!(matches!(result.get(0), datatypes::value::Value::Null));
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::BuildHasher;
|
||||
|
||||
use ahash::RandomState;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Escapes special characters in the provided pattern string for `LIKE`.
|
||||
///
|
||||
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
||||
@@ -32,6 +37,71 @@ pub fn escape_like_pattern(pattern: &str) -> String {
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
|
||||
/// A random state with fixed seeds.
|
||||
///
|
||||
/// This is used to ensure that the hash values are consistent across
|
||||
/// different processes, and easy to serialize and deserialize.
|
||||
#[derive(Debug)]
|
||||
pub struct FixedRandomState {
|
||||
state: RandomState,
|
||||
}
|
||||
|
||||
impl FixedRandomState {
|
||||
// some random seeds
|
||||
const RANDOM_SEED_0: u64 = 0x517cc1b727220a95;
|
||||
const RANDOM_SEED_1: u64 = 0x428a2f98d728ae22;
|
||||
const RANDOM_SEED_2: u64 = 0x7137449123ef65cd;
|
||||
const RANDOM_SEED_3: u64 = 0xb5c0fbcfec4d3b2f;
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: ahash::RandomState::with_seeds(
|
||||
Self::RANDOM_SEED_0,
|
||||
Self::RANDOM_SEED_1,
|
||||
Self::RANDOM_SEED_2,
|
||||
Self::RANDOM_SEED_3,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FixedRandomState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl BuildHasher for FixedRandomState {
|
||||
type Hasher = ahash::AHasher;
|
||||
|
||||
fn build_hasher(&self) -> Self::Hasher {
|
||||
self.state.build_hasher()
|
||||
}
|
||||
|
||||
fn hash_one<T: std::hash::Hash>(&self, x: T) -> u64 {
|
||||
self.state.hash_one(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for FixedRandomState {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_unit()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for FixedRandomState {
|
||||
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(Self::new())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -22,4 +22,4 @@ store-api.workspace = true
|
||||
table.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
|
||||
@@ -16,7 +16,6 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use futures::TryStreamExt;
|
||||
use moka::future::Cache;
|
||||
use moka::ops::compute::Op;
|
||||
use table::metadata::TableId;
|
||||
@@ -54,9 +53,13 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
||||
Box::pin(async move {
|
||||
table_flow_manager
|
||||
.flows(table_id)
|
||||
.map_ok(|(key, value)| (key.flownode_id(), value.peer))
|
||||
.try_collect::<HashMap<_, _>>()
|
||||
.await
|
||||
.map(|flows| {
|
||||
flows
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.flownode_id(), value.peer))
|
||||
.collect::<HashMap<_, _>>()
|
||||
})
|
||||
// We must cache the `HashSet` even if it's empty,
|
||||
// to avoid future requests to the remote storage next time;
|
||||
// If the value is added to the remote storage,
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::str::FromStr;
|
||||
|
||||
use api::v1::meta::HeartbeatRequest;
|
||||
use common_error::ext::ErrorExt;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -55,12 +57,10 @@ pub trait ClusterInfo {
|
||||
}
|
||||
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||
///
|
||||
/// This key cannot be used to describe the `Metasrv` because the `Metasrv` does not have
|
||||
/// a `cluster_id`, it serves multiple clusters.
|
||||
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct NodeInfoKey {
|
||||
/// The cluster id.
|
||||
// todo(hl): remove cluster_id as it is not assigned anywhere.
|
||||
pub cluster_id: ClusterId,
|
||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||
pub role: Role,
|
||||
@@ -69,6 +69,28 @@ pub struct NodeInfoKey {
|
||||
}
|
||||
|
||||
impl NodeInfoKey {
|
||||
/// Try to create a `NodeInfoKey` from a "good" heartbeat request. "good" as in every needed
|
||||
/// piece of information is provided and valid.
|
||||
pub fn new(request: &HeartbeatRequest) -> Option<Self> {
|
||||
let HeartbeatRequest { header, peer, .. } = request;
|
||||
let header = header.as_ref()?;
|
||||
let peer = peer.as_ref()?;
|
||||
|
||||
let role = header.role.try_into().ok()?;
|
||||
let node_id = match role {
|
||||
// Because the Frontend is stateless, it's too easy to neglect choosing a unique id
|
||||
// for it when setting up a cluster. So we calculate its id from its address.
|
||||
Role::Frontend => calculate_node_id(&peer.addr),
|
||||
_ => peer.id,
|
||||
};
|
||||
|
||||
Some(NodeInfoKey {
|
||||
cluster_id: header.cluster_id,
|
||||
role,
|
||||
node_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn key_prefix_with_cluster_id(cluster_id: u64) -> String {
|
||||
format!("{}-{}-", CLUSTER_NODE_INFO_PREFIX, cluster_id)
|
||||
}
|
||||
@@ -83,6 +105,13 @@ impl NodeInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate (by using the DefaultHasher) the node's id from its address.
|
||||
fn calculate_node_id(addr: &str) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
addr.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
/// The information of a node in the cluster.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct NodeInfo {
|
||||
@@ -100,7 +129,7 @@ pub struct NodeInfo {
|
||||
pub start_time_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Role {
|
||||
Datanode,
|
||||
Frontend,
|
||||
@@ -201,8 +230,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NodeInfoKey> for Vec<u8> {
|
||||
fn from(key: NodeInfoKey) -> Self {
|
||||
impl From<&NodeInfoKey> for Vec<u8> {
|
||||
fn from(key: &NodeInfoKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}-{}",
|
||||
CLUSTER_NODE_INFO_PREFIX,
|
||||
@@ -271,6 +300,7 @@ impl TryFrom<i32> for Role {
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use super::*;
|
||||
use crate::cluster::Role::{Datanode, Frontend};
|
||||
use crate::cluster::{DatanodeStatus, NodeInfo, NodeInfoKey, NodeStatus};
|
||||
use crate::peer::Peer;
|
||||
@@ -283,7 +313,7 @@ mod tests {
|
||||
node_id: 2,
|
||||
};
|
||||
|
||||
let key_bytes: Vec<u8> = key.into();
|
||||
let key_bytes: Vec<u8> = (&key).into();
|
||||
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
||||
|
||||
assert_eq!(1, new_key.cluster_id);
|
||||
@@ -338,4 +368,26 @@ mod tests {
|
||||
let prefix = NodeInfoKey::key_prefix_with_role(2, Frontend);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-2-1-");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_node_id_from_addr() {
|
||||
// Test empty string
|
||||
assert_eq!(calculate_node_id(""), calculate_node_id(""));
|
||||
|
||||
// Test same addresses return same ids
|
||||
let addr1 = "127.0.0.1:8080";
|
||||
let id1 = calculate_node_id(addr1);
|
||||
let id2 = calculate_node_id(addr1);
|
||||
assert_eq!(id1, id2);
|
||||
|
||||
// Test different addresses return different ids
|
||||
let addr2 = "127.0.0.1:8081";
|
||||
let id3 = calculate_node_id(addr2);
|
||||
assert_ne!(id1, id3);
|
||||
|
||||
// Test long address
|
||||
let long_addr = "very.long.domain.name.example.com:9999";
|
||||
let id4 = calculate_node_id(long_addr);
|
||||
assert!(id4 > 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
mod metadata;
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
|
||||
use api::v1::flow::flow_request::Body as PbFlowRequest;
|
||||
use api::v1::flow::{CreateRequest, FlowRequest, FlowRequestHeader};
|
||||
@@ -28,7 +29,6 @@ use common_procedure::{
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future::join_all;
|
||||
use futures::TryStreamExt;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -77,6 +77,7 @@ impl CreateFlowProcedure {
|
||||
query_context,
|
||||
state: CreateFlowState::Prepare,
|
||||
prev_flow_info_value: None,
|
||||
flow_type: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -104,7 +105,7 @@ impl CreateFlowProcedure {
|
||||
if create_if_not_exists && or_replace {
|
||||
// this is forbidden because not clear what does that mean exactly
|
||||
return error::UnsupportedSnafu {
|
||||
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`".to_string(),
|
||||
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`",
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
@@ -129,9 +130,10 @@ impl CreateFlowProcedure {
|
||||
.flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.map_ok(|(_, value)| value.peer)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(_, value)| value.peer)
|
||||
.collect::<Vec<_>>();
|
||||
self.data.flow_id = Some(flow_id);
|
||||
self.data.peers = peers;
|
||||
info!("Replacing flow, flow_id: {}", flow_id);
|
||||
@@ -175,6 +177,8 @@ impl CreateFlowProcedure {
|
||||
self.allocate_flow_id().await?;
|
||||
}
|
||||
self.data.state = CreateFlowState::CreateFlows;
|
||||
// determine flow type
|
||||
self.data.flow_type = Some(determine_flow_type(&self.data.task));
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
@@ -309,6 +313,11 @@ impl Procedure for CreateFlowProcedure {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn determine_flow_type(_flow_task: &CreateFlowTask) -> FlowType {
|
||||
// TODO(discord9): determine flow type
|
||||
FlowType::RecordingRule
|
||||
}
|
||||
|
||||
/// The state of [CreateFlowProcedure].
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
|
||||
pub enum CreateFlowState {
|
||||
@@ -322,6 +331,35 @@ pub enum CreateFlowState {
|
||||
CreateMetadata,
|
||||
}
|
||||
|
||||
/// The type of flow.
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum FlowType {
|
||||
/// The flow is a recording rule task.
|
||||
RecordingRule,
|
||||
/// The flow is a streaming task.
|
||||
Streaming,
|
||||
}
|
||||
|
||||
impl FlowType {
|
||||
pub const RECORDING_RULE: &str = "recording_rule";
|
||||
pub const STREAMING: &str = "streaming";
|
||||
}
|
||||
|
||||
impl Default for FlowType {
|
||||
fn default() -> Self {
|
||||
Self::RecordingRule
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FlowType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
FlowType::RecordingRule => write!(f, "{}", FlowType::RECORDING_RULE),
|
||||
FlowType::Streaming => write!(f, "{}", FlowType::STREAMING),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The serializable data.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateFlowData {
|
||||
@@ -335,6 +373,7 @@ pub struct CreateFlowData {
|
||||
/// For verify if prev value is consistent when need to update flow metadata.
|
||||
/// only set when `or_replace` is true.
|
||||
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||
pub(crate) flow_type: Option<FlowType>,
|
||||
}
|
||||
|
||||
impl From<&CreateFlowData> for CreateRequest {
|
||||
@@ -342,7 +381,7 @@ impl From<&CreateFlowData> for CreateRequest {
|
||||
let flow_id = value.flow_id.unwrap();
|
||||
let source_table_ids = &value.source_table_ids;
|
||||
|
||||
CreateRequest {
|
||||
let mut req = CreateRequest {
|
||||
flow_id: Some(api::v1::FlowId { id: flow_id }),
|
||||
source_table_ids: source_table_ids
|
||||
.iter()
|
||||
@@ -356,7 +395,11 @@ impl From<&CreateFlowData> for CreateRequest {
|
||||
comment: value.task.comment.clone(),
|
||||
sql: value.task.sql.clone(),
|
||||
flow_options: value.task.flow_options.clone(),
|
||||
}
|
||||
};
|
||||
|
||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||
req.flow_options.insert("flow_type".to_string(), flow_type);
|
||||
req
|
||||
}
|
||||
}
|
||||
|
||||
@@ -369,7 +412,7 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
||||
expire_after,
|
||||
comment,
|
||||
sql,
|
||||
flow_options: options,
|
||||
flow_options: mut options,
|
||||
..
|
||||
} = value.task.clone();
|
||||
|
||||
@@ -386,19 +429,21 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
||||
.map(|(idx, peer)| (idx as u32, FlowRouteValue { peer: peer.clone() }))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
(
|
||||
FlowInfoValue {
|
||||
source_table_ids: value.source_table_ids.clone(),
|
||||
sink_table_name,
|
||||
flownode_ids,
|
||||
catalog_name,
|
||||
flow_name,
|
||||
raw_sql: sql,
|
||||
expire_after,
|
||||
comment,
|
||||
options,
|
||||
},
|
||||
flow_routes,
|
||||
)
|
||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||
options.insert("flow_type".to_string(), flow_type);
|
||||
|
||||
let flow_info = FlowInfoValue {
|
||||
source_table_ids: value.source_table_ids.clone(),
|
||||
sink_table_name,
|
||||
flownode_ids,
|
||||
catalog_name,
|
||||
flow_name,
|
||||
raw_sql: sql,
|
||||
expire_after,
|
||||
comment,
|
||||
options,
|
||||
};
|
||||
|
||||
(flow_info, flow_routes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ impl State for DropDatabaseExecutor {
|
||||
.await?;
|
||||
executor.invalidate_table_cache(ddl_ctx).await?;
|
||||
executor
|
||||
.on_drop_regions(ddl_ctx, &self.physical_region_routes)
|
||||
.on_drop_regions(ddl_ctx, &self.physical_region_routes, true)
|
||||
.await?;
|
||||
info!("Table: {}({}) is dropped", self.table_name, self.table_id);
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::format_full_flow_name;
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::ddl::drop_flow::DropFlowProcedure;
|
||||
@@ -39,9 +38,10 @@ impl DropFlowProcedure {
|
||||
.flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(self.data.task.flow_id)
|
||||
.map_ok(|(_, value)| value)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(_, value)| value)
|
||||
.collect::<Vec<_>>();
|
||||
ensure!(
|
||||
!flow_route_values.is_empty(),
|
||||
error::FlowRouteNotFoundSnafu {
|
||||
|
||||
@@ -156,7 +156,7 @@ impl DropTableProcedure {
|
||||
|
||||
pub async fn on_datanode_drop_regions(&mut self) -> Result<Status> {
|
||||
self.executor
|
||||
.on_drop_regions(&self.context, &self.data.physical_region_routes)
|
||||
.on_drop_regions(&self.context, &self.data.physical_region_routes, false)
|
||||
.await?;
|
||||
self.data.state = DropTableState::DeleteTombstone;
|
||||
Ok(Status::executing(true))
|
||||
|
||||
@@ -214,6 +214,7 @@ impl DropTableExecutor {
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
region_routes: &[RegionRoute],
|
||||
fast_path: bool,
|
||||
) -> Result<()> {
|
||||
let leaders = find_leaders(region_routes);
|
||||
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
|
||||
@@ -236,6 +237,7 @@ impl DropTableExecutor {
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
fast_path,
|
||||
})),
|
||||
};
|
||||
let datanode = datanode.clone();
|
||||
|
||||
@@ -16,9 +16,9 @@ pub mod flow_info;
|
||||
pub(crate) mod flow_name;
|
||||
pub(crate) mod flow_route;
|
||||
pub mod flow_state;
|
||||
mod flownode_addr_helper;
|
||||
pub(crate) mod flownode_flow;
|
||||
pub(crate) mod table_flow;
|
||||
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -506,7 +506,6 @@ mod tests {
|
||||
let routes = flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
@@ -538,7 +537,6 @@ mod tests {
|
||||
let nodes = flow_metadata_manager
|
||||
.table_flow_manager()
|
||||
.flows(table_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
@@ -727,7 +725,6 @@ mod tests {
|
||||
let routes = flow_metadata_manager
|
||||
.flow_route_manager()
|
||||
.routes(flow_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
@@ -759,7 +756,6 @@ mod tests {
|
||||
let nodes = flow_metadata_manager
|
||||
.table_flow_manager()
|
||||
.flows(table_id)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
|
||||
@@ -12,14 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use futures::TryStreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||
use crate::key::node_address::NodeAddressKey;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -167,10 +168,7 @@ impl FlowRouteManager {
|
||||
}
|
||||
|
||||
/// Retrieves all [FlowRouteValue]s of the specified `flow_id`.
|
||||
pub fn routes(
|
||||
&self,
|
||||
flow_id: FlowId,
|
||||
) -> BoxStream<'static, Result<(FlowRouteKey, FlowRouteValue)>> {
|
||||
pub async fn routes(&self, flow_id: FlowId) -> Result<Vec<(FlowRouteKey, FlowRouteValue)>> {
|
||||
let start_key = FlowRouteKey::range_start_key(flow_id);
|
||||
let req = RangeRequest::new().with_prefix(start_key);
|
||||
let stream = PaginationStream::new(
|
||||
@@ -181,7 +179,9 @@ impl FlowRouteManager {
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
Box::pin(stream)
|
||||
let mut res = stream.try_collect::<Vec<_>>().await?;
|
||||
self.remap_flow_route_addresses(&mut res).await?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Builds a create flow routes transaction.
|
||||
@@ -203,6 +203,28 @@ impl FlowRouteManager {
|
||||
|
||||
Ok(Txn::new().and_then(txns))
|
||||
}
|
||||
|
||||
async fn remap_flow_route_addresses(
|
||||
&self,
|
||||
flow_routes: &mut [(FlowRouteKey, FlowRouteValue)],
|
||||
) -> Result<()> {
|
||||
let keys = flow_routes
|
||||
.iter()
|
||||
.map(|(_, value)| NodeAddressKey::with_flownode(value.peer.id))
|
||||
.collect();
|
||||
let flow_node_addrs =
|
||||
flownode_addr_helper::get_flownode_addresses(&self.kv_backend, keys).await?;
|
||||
for (_, flow_route_value) in flow_routes.iter_mut() {
|
||||
let flownode_id = flow_route_value.peer.id;
|
||||
// If an id lacks a corresponding address in the `flow_node_addrs`,
|
||||
// it means the old address in `table_flow_value` is still valid,
|
||||
// which is expected.
|
||||
if let Some(node_addr) = flow_node_addrs.get(&flownode_id) {
|
||||
flow_route_value.peer.addr = node_addr.peer.addr.clone();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
47
src/common/meta/src/key/flow/flownode_addr_helper.rs
Normal file
47
src/common/meta/src/key/flow/flownode_addr_helper.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::node_address::{NodeAddressKey, NodeAddressValue};
|
||||
use crate::key::{MetadataKey, MetadataValue};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
|
||||
/// Get the addresses of the flownodes.
|
||||
/// The result is a map: node_id -> NodeAddressValue
|
||||
pub(crate) async fn get_flownode_addresses(
|
||||
kv_backend: &KvBackendRef,
|
||||
keys: Vec<NodeAddressKey>,
|
||||
) -> Result<HashMap<u64, NodeAddressValue>> {
|
||||
if keys.is_empty() {
|
||||
return Ok(HashMap::default());
|
||||
}
|
||||
|
||||
let req = BatchGetRequest {
|
||||
keys: keys.into_iter().map(|k| k.to_bytes()).collect(),
|
||||
};
|
||||
kv_backend
|
||||
.batch_get(req)
|
||||
.await?
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| {
|
||||
let key = NodeAddressKey::from_bytes(&kv.key)?;
|
||||
let value = NodeAddressValue::try_from_raw_value(&kv.value)?;
|
||||
Ok((key.node_id, value))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use futures::TryStreamExt;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -22,7 +22,8 @@ use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::flow::FlowScoped;
|
||||
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||
use crate::key::node_address::NodeAddressKey;
|
||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -196,10 +197,7 @@ impl TableFlowManager {
|
||||
/// Retrieves all [TableFlowKey]s of the specified `table_id`.
|
||||
///
|
||||
/// TODO(discord9): add cache for it since range request does not support cache.
|
||||
pub fn flows(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
) -> BoxStream<'static, Result<(TableFlowKey, TableFlowValue)>> {
|
||||
pub async fn flows(&self, table_id: TableId) -> Result<Vec<(TableFlowKey, TableFlowValue)>> {
|
||||
let start_key = TableFlowKey::range_start_key(table_id);
|
||||
let req = RangeRequest::new().with_prefix(start_key);
|
||||
let stream = PaginationStream::new(
|
||||
@@ -210,7 +208,9 @@ impl TableFlowManager {
|
||||
)
|
||||
.into_stream();
|
||||
|
||||
Box::pin(stream)
|
||||
let mut res = stream.try_collect::<Vec<_>>().await?;
|
||||
self.remap_table_flow_addresses(&mut res).await?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Builds a create table flow transaction.
|
||||
@@ -238,6 +238,28 @@ impl TableFlowManager {
|
||||
|
||||
Ok(Txn::new().and_then(txns))
|
||||
}
|
||||
|
||||
async fn remap_table_flow_addresses(
|
||||
&self,
|
||||
table_flows: &mut [(TableFlowKey, TableFlowValue)],
|
||||
) -> Result<()> {
|
||||
let keys = table_flows
|
||||
.iter()
|
||||
.map(|(_, value)| NodeAddressKey::with_flownode(value.peer.id))
|
||||
.collect::<Vec<_>>();
|
||||
let flownode_addrs =
|
||||
flownode_addr_helper::get_flownode_addresses(&self.kv_backend, keys).await?;
|
||||
for (_, table_flow_value) in table_flows.iter_mut() {
|
||||
let flownode_id = table_flow_value.peer.id;
|
||||
// If an id lacks a corresponding address in the `flow_node_addrs`,
|
||||
// it means the old address in `table_flow_value` is still valid,
|
||||
// which is expected.
|
||||
if let Some(flownode_addr) = flownode_addrs.get(&flownode_id) {
|
||||
table_flow_value.peer.addr = flownode_addr.peer.addr.clone();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -39,6 +39,10 @@ impl NodeAddressKey {
|
||||
pub fn with_datanode(node_id: u64) -> Self {
|
||||
Self::new(Role::Datanode, node_id)
|
||||
}
|
||||
|
||||
pub fn with_flownode(node_id: u64) -> Self {
|
||||
Self::new(Role::Flownode, node_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
|
||||
|
||||
@@ -34,6 +34,7 @@ pub mod kv_backend;
|
||||
pub mod leadership_notifier;
|
||||
pub mod lock_key;
|
||||
pub mod metrics;
|
||||
pub mod node_expiry_listener;
|
||||
pub mod node_manager;
|
||||
pub mod peer;
|
||||
pub mod range_stream;
|
||||
|
||||
152
src/common/meta/src/node_expiry_listener.rs
Normal file
152
src/common/meta/src/node_expiry_listener.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{interval, MissedTickBehavior};
|
||||
|
||||
use crate::cluster::{NodeInfo, NodeInfoKey};
|
||||
use crate::error;
|
||||
use crate::kv_backend::ResettableKvBackendRef;
|
||||
use crate::leadership_notifier::LeadershipChangeListener;
|
||||
use crate::rpc::store::RangeRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
/// [NodeExpiryListener] periodically checks all node info in memory and removes
|
||||
/// expired node info to prevent memory leak.
|
||||
pub struct NodeExpiryListener {
|
||||
handle: Mutex<Option<JoinHandle<()>>>,
|
||||
max_idle_time: Duration,
|
||||
in_memory: ResettableKvBackendRef,
|
||||
}
|
||||
|
||||
impl Drop for NodeExpiryListener {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeExpiryListener {
|
||||
pub fn new(max_idle_time: Duration, in_memory: ResettableKvBackendRef) -> Self {
|
||||
Self {
|
||||
handle: Mutex::new(None),
|
||||
max_idle_time,
|
||||
in_memory,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(&self) {
|
||||
let mut handle = self.handle.lock().unwrap();
|
||||
if handle.is_none() {
|
||||
let in_memory = self.in_memory.clone();
|
||||
|
||||
let max_idle_time = self.max_idle_time;
|
||||
let ticker_loop = tokio::spawn(async move {
|
||||
// Run clean task every minute.
|
||||
let mut interval = interval(Duration::from_secs(60));
|
||||
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||
loop {
|
||||
interval.tick().await;
|
||||
if let Err(e) = Self::clean_expired_nodes(&in_memory, max_idle_time).await {
|
||||
error!(e; "Failed to clean expired node");
|
||||
}
|
||||
}
|
||||
});
|
||||
*handle = Some(ticker_loop);
|
||||
}
|
||||
}
|
||||
|
||||
fn stop(&self) {
|
||||
if let Some(handle) = self.handle.lock().unwrap().take() {
|
||||
handle.abort();
|
||||
info!("Node expiry listener stopped")
|
||||
}
|
||||
}
|
||||
|
||||
/// Cleans expired nodes from memory.
|
||||
async fn clean_expired_nodes(
|
||||
in_memory: &ResettableKvBackendRef,
|
||||
max_idle_time: Duration,
|
||||
) -> error::Result<()> {
|
||||
let node_keys = Self::list_expired_nodes(in_memory, max_idle_time).await?;
|
||||
for key in node_keys {
|
||||
let key_bytes: Vec<u8> = (&key).into();
|
||||
if let Err(e) = in_memory.delete(&key_bytes, false).await {
|
||||
warn!(e; "Failed to delete expired node: {:?}", key_bytes);
|
||||
} else {
|
||||
debug!("Deleted expired node key: {:?}", key);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lists expired nodes that have been inactive more than `max_idle_time`.
|
||||
async fn list_expired_nodes(
|
||||
in_memory: &ResettableKvBackendRef,
|
||||
max_idle_time: Duration,
|
||||
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
|
||||
let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
|
||||
let req = RangeRequest::new().with_prefix(prefix);
|
||||
let current_time_millis = common_time::util::current_time_millis();
|
||||
let resp = in_memory.range(req).await?;
|
||||
Ok(resp
|
||||
.kvs
|
||||
.into_iter()
|
||||
.filter_map(move |KeyValue { key, value }| {
|
||||
let Ok(info) = NodeInfo::try_from(value).inspect_err(|e| {
|
||||
warn!(e; "Unrecognized node info value");
|
||||
}) else {
|
||||
return None;
|
||||
};
|
||||
if (current_time_millis - info.last_activity_ts) > max_idle_time.as_millis() as i64
|
||||
{
|
||||
NodeInfoKey::try_from(key)
|
||||
.inspect_err(|e| {
|
||||
warn!(e; "Unrecognized node info key: {:?}", info.peer);
|
||||
})
|
||||
.ok()
|
||||
.inspect(|node_key| {
|
||||
debug!("Found expired node: {:?}", node_key);
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl LeadershipChangeListener for NodeExpiryListener {
|
||||
fn name(&self) -> &str {
|
||||
"NodeExpiryListener"
|
||||
}
|
||||
|
||||
async fn on_leader_start(&self) -> error::Result<()> {
|
||||
self.start().await;
|
||||
info!(
|
||||
"On leader start, node expiry listener started with max idle time: {:?}",
|
||||
self.max_idle_time
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_leader_stop(&self) -> error::Result<()> {
|
||||
self.stop();
|
||||
info!("On leader stop, node expiry listener stopped");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,7 @@ datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
file-engine.workspace = true
|
||||
futures = "0.3"
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
lazy_static.workspace = true
|
||||
@@ -47,6 +47,7 @@ log-store.workspace = true
|
||||
meta-client.workspace = true
|
||||
metric-engine.workspace = true
|
||||
mito2.workspace = true
|
||||
num_cpus.workspace = true
|
||||
object-store.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
|
||||
@@ -171,6 +171,10 @@ pub struct S3Config {
|
||||
pub secret_access_key: SecretString,
|
||||
pub endpoint: Option<String>,
|
||||
pub region: Option<String>,
|
||||
/// Enable virtual host style so that opendal will send API requests in virtual host style instead of path style.
|
||||
/// By default, opendal will send API to https://s3.us-east-1.amazonaws.com/bucket_name
|
||||
/// Enabled, opendal will send API to https://bucket_name.s3.us-east-1.amazonaws.com
|
||||
pub enable_virtual_host_style: bool,
|
||||
#[serde(flatten)]
|
||||
pub cache: ObjectStorageCacheConfig,
|
||||
pub http_client: HttpClientConfig,
|
||||
@@ -185,6 +189,7 @@ impl PartialEq for S3Config {
|
||||
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
||||
&& self.endpoint == other.endpoint
|
||||
&& self.region == other.region
|
||||
&& self.enable_virtual_host_style == other.enable_virtual_host_style
|
||||
&& self.cache == other.cache
|
||||
&& self.http_client == other.http_client
|
||||
}
|
||||
@@ -289,6 +294,7 @@ impl Default for S3Config {
|
||||
root: String::default(),
|
||||
access_key_id: SecretString::from(String::default()),
|
||||
secret_access_key: SecretString::from(String::default()),
|
||||
enable_virtual_host_style: false,
|
||||
endpoint: Option::default(),
|
||||
region: Option::default(),
|
||||
cache: ObjectStorageCacheConfig::default(),
|
||||
|
||||
@@ -224,6 +224,20 @@ impl HeartbeatTask {
|
||||
common_runtime::spawn_hb(async move {
|
||||
let sleep = tokio::time::sleep(Duration::from_millis(0));
|
||||
tokio::pin!(sleep);
|
||||
|
||||
let build_info = common_version::build_info();
|
||||
let heartbeat_request = HeartbeatRequest {
|
||||
peer: self_peer,
|
||||
node_epoch,
|
||||
info: Some(NodeInfo {
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
start_time_ms: node_epoch,
|
||||
cpus: num_cpus::get() as u32,
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
loop {
|
||||
if !running.load(Ordering::Relaxed) {
|
||||
info!("shutdown heartbeat task");
|
||||
@@ -235,9 +249,8 @@ impl HeartbeatTask {
|
||||
match outgoing_message_to_mailbox_message(message) {
|
||||
Ok(message) => {
|
||||
let req = HeartbeatRequest {
|
||||
peer: self_peer.clone(),
|
||||
mailbox_message: Some(message),
|
||||
..Default::default()
|
||||
..heartbeat_request.clone()
|
||||
};
|
||||
HEARTBEAT_RECV_COUNT.with_label_values(&["success"]).inc();
|
||||
Some(req)
|
||||
@@ -253,22 +266,13 @@ impl HeartbeatTask {
|
||||
}
|
||||
}
|
||||
_ = &mut sleep => {
|
||||
let build_info = common_version::build_info();
|
||||
let region_stats = Self::load_region_stats(®ion_server_clone);
|
||||
let now = Instant::now();
|
||||
let duration_since_epoch = (now - epoch).as_millis() as u64;
|
||||
let req = HeartbeatRequest {
|
||||
peer: self_peer.clone(),
|
||||
region_stats,
|
||||
duration_since_epoch,
|
||||
node_epoch,
|
||||
info: Some(NodeInfo {
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
// The start timestamp is the same as node_epoch currently.
|
||||
start_time_ms: node_epoch,
|
||||
}),
|
||||
..Default::default()
|
||||
..heartbeat_request.clone()
|
||||
};
|
||||
sleep.as_mut().reset(now + Duration::from_millis(interval));
|
||||
Some(req)
|
||||
|
||||
@@ -1218,7 +1218,10 @@ mod tests {
|
||||
);
|
||||
|
||||
let response = mock_region_server
|
||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(response.affected_rows, 0);
|
||||
@@ -1310,7 +1313,10 @@ mod tests {
|
||||
.insert(region_id, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
|
||||
mock_region_server
|
||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
|
||||
@@ -41,10 +41,13 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
|
||||
|
||||
if s3_config.endpoint.is_some() {
|
||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||
};
|
||||
}
|
||||
if s3_config.region.is_some() {
|
||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||
};
|
||||
}
|
||||
if s3_config.enable_virtual_host_style {
|
||||
builder = builder.enable_virtual_host_style();
|
||||
}
|
||||
|
||||
Ok(ObjectStore::new(builder)
|
||||
.context(error::InitBackendSnafu)?
|
||||
|
||||
@@ -29,7 +29,7 @@ jsonb.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
ordered-float = { version = "3.0", features = ["serde"] }
|
||||
paste = "1.0"
|
||||
paste.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -32,5 +32,5 @@ pub mod types;
|
||||
pub mod value;
|
||||
pub mod vectors;
|
||||
|
||||
pub use arrow;
|
||||
pub use arrow::{self, compute};
|
||||
pub use error::{Error, Result};
|
||||
|
||||
@@ -13,7 +13,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait = "0.1"
|
||||
async-trait.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-datasource.workspace = true
|
||||
common-error.workspace = true
|
||||
|
||||
@@ -41,7 +41,7 @@ datafusion-substrait.workspace = true
|
||||
datatypes.workspace = true
|
||||
enum-as-inner = "0.6.0"
|
||||
enum_dispatch = "0.3"
|
||||
futures = "0.3"
|
||||
futures.workspace = true
|
||||
get-size2 = "0.1.2"
|
||||
greptime-proto.workspace = true
|
||||
# This fork of hydroflow is simply for keeping our dependency in our org, and pin the version
|
||||
@@ -53,6 +53,7 @@ lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
nom = "7.1.3"
|
||||
num-traits = "0.2"
|
||||
num_cpus.workspace = true
|
||||
operator.workspace = true
|
||||
partition.workspace = true
|
||||
prometheus.workspace = true
|
||||
|
||||
@@ -60,12 +60,12 @@ async fn query_flow_state(
|
||||
#[derive(Clone)]
|
||||
pub struct HeartbeatTask {
|
||||
node_id: u64,
|
||||
node_epoch: u64,
|
||||
peer_addr: String,
|
||||
meta_client: Arc<MetaClient>,
|
||||
report_interval: Duration,
|
||||
retry_interval: Duration,
|
||||
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||
start_time_ms: u64,
|
||||
running: Arc<AtomicBool>,
|
||||
query_stat_size: Option<SizeReportSender>,
|
||||
}
|
||||
@@ -83,12 +83,12 @@ impl HeartbeatTask {
|
||||
) -> Self {
|
||||
Self {
|
||||
node_id: opts.node_id.unwrap_or(0),
|
||||
node_epoch: common_time::util::current_time_millis() as u64,
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
meta_client,
|
||||
report_interval: heartbeat_opts.interval,
|
||||
retry_interval: heartbeat_opts.retry_interval,
|
||||
resp_handler_executor,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
running: Arc::new(AtomicBool::new(false)),
|
||||
query_stat_size: None,
|
||||
}
|
||||
@@ -103,6 +103,11 @@ impl HeartbeatTask {
|
||||
warn!("Heartbeat task started multiple times");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.create_streams().await
|
||||
}
|
||||
|
||||
async fn create_streams(&self) -> Result<(), Error> {
|
||||
info!("Start to establish the heartbeat connection to metasrv.");
|
||||
let (req_sender, resp_stream) = self
|
||||
.meta_client
|
||||
@@ -134,10 +139,9 @@ impl HeartbeatTask {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_heartbeat_request(
|
||||
fn new_heartbeat_request(
|
||||
heartbeat_request: &HeartbeatRequest,
|
||||
message: Option<OutgoingMessage>,
|
||||
peer: Option<Peer>,
|
||||
start_time_ms: u64,
|
||||
latest_report: &Option<FlowStat>,
|
||||
) -> Option<HeartbeatRequest> {
|
||||
let mailbox_message = match message.map(outgoing_message_to_mailbox_message) {
|
||||
@@ -161,10 +165,8 @@ impl HeartbeatTask {
|
||||
|
||||
Some(HeartbeatRequest {
|
||||
mailbox_message,
|
||||
peer,
|
||||
info: Self::build_node_info(start_time_ms),
|
||||
flow_stat,
|
||||
..Default::default()
|
||||
..heartbeat_request.clone()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -174,6 +176,7 @@ impl HeartbeatTask {
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
start_time_ms,
|
||||
cpus: num_cpus::get() as u32,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -183,7 +186,7 @@ impl HeartbeatTask {
|
||||
mut outgoing_rx: mpsc::Receiver<OutgoingMessage>,
|
||||
) {
|
||||
let report_interval = self.report_interval;
|
||||
let start_time_ms = self.start_time_ms;
|
||||
let node_epoch = self.node_epoch;
|
||||
let self_peer = Some(Peer {
|
||||
id: self.node_id,
|
||||
addr: self.peer_addr.clone(),
|
||||
@@ -198,18 +201,25 @@ impl HeartbeatTask {
|
||||
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
|
||||
let mut latest_report = None;
|
||||
|
||||
let heartbeat_request = HeartbeatRequest {
|
||||
peer: self_peer,
|
||||
node_epoch,
|
||||
info: Self::build_node_info(node_epoch),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
loop {
|
||||
let req = tokio::select! {
|
||||
message = outgoing_rx.recv() => {
|
||||
if let Some(message) = message {
|
||||
Self::create_heartbeat_request(Some(message), self_peer.clone(), start_time_ms, &latest_report)
|
||||
Self::new_heartbeat_request(&heartbeat_request, Some(message), &latest_report)
|
||||
} else {
|
||||
// Receives None that means Sender was dropped, we need to break the current loop
|
||||
break
|
||||
}
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
Self::create_heartbeat_request(None, self_peer.clone(), start_time_ms, &latest_report)
|
||||
Self::new_heartbeat_request(&heartbeat_request, None, &latest_report)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -226,6 +236,8 @@ impl HeartbeatTask {
|
||||
// set the timeout to half of the report interval so that it wouldn't delay heartbeat if something went horribly wrong
|
||||
latest_report = query_flow_state(&query_stat_size, report_interval / 2).await;
|
||||
}
|
||||
|
||||
info!("flownode heartbeat task stopped.");
|
||||
});
|
||||
}
|
||||
|
||||
@@ -269,7 +281,7 @@ impl HeartbeatTask {
|
||||
|
||||
info!("Try to re-establish the heartbeat connection to metasrv.");
|
||||
|
||||
if self.start().await.is_ok() {
|
||||
if self.create_streams().await.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-trait = "0.1"
|
||||
async-trait.workspace = true
|
||||
auth.workspace = true
|
||||
cache.workspace = true
|
||||
catalog.workspace = true
|
||||
@@ -44,6 +44,7 @@ lazy_static.workspace = true
|
||||
log-query.workspace = true
|
||||
log-store.workspace = true
|
||||
meta-client.workspace = true
|
||||
num_cpus.workspace = true
|
||||
opentelemetry-proto.workspace = true
|
||||
operator.workspace = true
|
||||
partition.workspace = true
|
||||
@@ -70,7 +71,7 @@ catalog = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures.workspace = true
|
||||
serde_json.workspace = true
|
||||
strfmt = "0.2"
|
||||
tower.workspace = true
|
||||
|
||||
@@ -118,10 +118,9 @@ impl HeartbeatTask {
|
||||
});
|
||||
}
|
||||
|
||||
fn create_heartbeat_request(
|
||||
fn new_heartbeat_request(
|
||||
heartbeat_request: &HeartbeatRequest,
|
||||
message: Option<OutgoingMessage>,
|
||||
peer: Option<Peer>,
|
||||
start_time_ms: u64,
|
||||
) -> Option<HeartbeatRequest> {
|
||||
let mailbox_message = match message.map(outgoing_message_to_mailbox_message) {
|
||||
Some(Ok(message)) => Some(message),
|
||||
@@ -134,9 +133,7 @@ impl HeartbeatTask {
|
||||
|
||||
Some(HeartbeatRequest {
|
||||
mailbox_message,
|
||||
peer,
|
||||
info: Self::build_node_info(start_time_ms),
|
||||
..Default::default()
|
||||
..heartbeat_request.clone()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -147,6 +144,7 @@ impl HeartbeatTask {
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
start_time_ms,
|
||||
cpus: num_cpus::get() as u32,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -167,11 +165,17 @@ impl HeartbeatTask {
|
||||
let sleep = tokio::time::sleep(Duration::from_millis(0));
|
||||
tokio::pin!(sleep);
|
||||
|
||||
let heartbeat_request = HeartbeatRequest {
|
||||
peer: self_peer,
|
||||
info: Self::build_node_info(start_time_ms),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
loop {
|
||||
let req = tokio::select! {
|
||||
message = outgoing_rx.recv() => {
|
||||
if let Some(message) = message {
|
||||
Self::create_heartbeat_request(Some(message), self_peer.clone(), start_time_ms)
|
||||
Self::new_heartbeat_request(&heartbeat_request, Some(message))
|
||||
} else {
|
||||
// Receives None that means Sender was dropped, we need to break the current loop
|
||||
break
|
||||
@@ -179,7 +183,7 @@ impl HeartbeatTask {
|
||||
}
|
||||
_ = &mut sleep => {
|
||||
sleep.as_mut().reset(Instant::now() + Duration::from_millis(report_interval));
|
||||
Self::create_heartbeat_request(None, self_peer.clone(), start_time_ms)
|
||||
Self::new_heartbeat_request(&heartbeat_request, None)
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -237,6 +237,13 @@ impl Instance {
|
||||
|
||||
let output = match stmt {
|
||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||
// TODO: remove this when format is supported in datafusion
|
||||
if let Statement::Explain(explain) = &stmt {
|
||||
if let Some(format) = explain.format() {
|
||||
query_ctx.set_explain_format(format.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let stmt = QueryStatement::Sql(stmt);
|
||||
let plan = self
|
||||
.statement_executor
|
||||
|
||||
@@ -25,12 +25,12 @@ use crate::fulltext_index::create::{FulltextIndexCreator, TantivyFulltextIndexCr
|
||||
use crate::fulltext_index::search::{FulltextIndexSearcher, RowId, TantivyFulltextIndexSearcher};
|
||||
use crate::fulltext_index::{Analyzer, Config};
|
||||
|
||||
async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager>) {
|
||||
async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager<String>>) {
|
||||
let staging_dir = create_temp_dir(prefix);
|
||||
let path = staging_dir.path().to_path_buf();
|
||||
(
|
||||
staging_dir,
|
||||
Arc::new(BoundedStager::new(path, 102400, None).await.unwrap()),
|
||||
Arc::new(BoundedStager::new(path, 102400, None, None).await.unwrap()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -68,13 +68,13 @@ async fn test_search(
|
||||
let file_accessor = Arc::new(MockFileAccessor::new(prefix));
|
||||
let puffin_manager = FsPuffinManager::new(stager, file_accessor);
|
||||
|
||||
let file_name = "fulltext_index";
|
||||
let blob_key = "fulltext_index";
|
||||
let mut writer = puffin_manager.writer(file_name).await.unwrap();
|
||||
create_index(prefix, &mut writer, blob_key, texts, config).await;
|
||||
let file_name = "fulltext_index".to_string();
|
||||
let blob_key = "fulltext_index".to_string();
|
||||
let mut writer = puffin_manager.writer(&file_name).await.unwrap();
|
||||
create_index(prefix, &mut writer, &blob_key, texts, config).await;
|
||||
|
||||
let reader = puffin_manager.reader(file_name).await.unwrap();
|
||||
let index_dir = reader.dir(blob_key).await.unwrap();
|
||||
let reader = puffin_manager.reader(&file_name).await.unwrap();
|
||||
let index_dir = reader.dir(&blob_key).await.unwrap();
|
||||
let searcher = TantivyFulltextIndexSearcher::new(index_dir.path()).unwrap();
|
||||
let results = searcher.search(query).await.unwrap();
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ pub struct LogQuery {
|
||||
}
|
||||
|
||||
/// Expression to calculate on log after filtering.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LogExpr {
|
||||
NamedIdent(String),
|
||||
PositionalIdent(usize),
|
||||
@@ -289,7 +289,7 @@ pub struct ColumnFilters {
|
||||
pub filters: Vec<ContentFilter>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum ContentFilter {
|
||||
// Search-based filters
|
||||
/// Only match the exact content.
|
||||
@@ -310,14 +310,19 @@ pub enum ContentFilter {
|
||||
// Value-based filters
|
||||
/// Content exists, a.k.a. not null.
|
||||
Exist,
|
||||
Between(String, String),
|
||||
Between {
|
||||
start: String,
|
||||
end: String,
|
||||
start_inclusive: bool,
|
||||
end_inclusive: bool,
|
||||
},
|
||||
// TODO(ruihang): arithmetic operations
|
||||
|
||||
// Compound filters
|
||||
Compound(Vec<ContentFilter>, BinaryOperator),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum BinaryOperator {
|
||||
And,
|
||||
Or,
|
||||
|
||||
@@ -9,7 +9,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait = "0.1"
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-grpc.workspace = true
|
||||
common-macro.workspace = true
|
||||
@@ -27,7 +27,7 @@ tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures.workspace = true
|
||||
meta-srv = { workspace = true, features = ["mock"] }
|
||||
tower.workspace = true
|
||||
tracing = "0.1"
|
||||
|
||||
@@ -198,13 +198,13 @@ impl Inner {
|
||||
}
|
||||
);
|
||||
|
||||
let leader = self
|
||||
let leader_addr = self
|
||||
.ask_leader
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.get_leader()
|
||||
.context(error::NoLeaderSnafu)?;
|
||||
let mut leader = self.make_client(leader)?;
|
||||
let mut leader = self.make_client(&leader_addr)?;
|
||||
|
||||
let (sender, receiver) = mpsc::channel::<HeartbeatRequest>(128);
|
||||
|
||||
@@ -236,7 +236,11 @@ impl Inner {
|
||||
.await
|
||||
.map_err(error::Error::from)?
|
||||
.context(error::CreateHeartbeatStreamSnafu)?;
|
||||
info!("Success to create heartbeat stream to server: {:#?}", res);
|
||||
|
||||
info!(
|
||||
"Success to create heartbeat stream to server: {}, response: {:#?}",
|
||||
leader_addr, res
|
||||
);
|
||||
|
||||
Ok((
|
||||
HeartbeatSender::new(self.id, self.role, sender),
|
||||
|
||||
@@ -16,7 +16,7 @@ local-ip-address.workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait = "0.1"
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
|
||||
@@ -44,6 +44,7 @@ use mailbox_handler::MailboxHandler;
|
||||
use on_leader_start_handler::OnLeaderStartHandler;
|
||||
use publish_heartbeat_handler::PublishHeartbeatHandler;
|
||||
use region_lease_handler::RegionLeaseHandler;
|
||||
use remap_flow_peer_handler::RemapFlowPeerHandler;
|
||||
use response_header_handler::ResponseHeaderHandler;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
@@ -71,6 +72,7 @@ pub mod mailbox_handler;
|
||||
pub mod on_leader_start_handler;
|
||||
pub mod publish_heartbeat_handler;
|
||||
pub mod region_lease_handler;
|
||||
pub mod remap_flow_peer_handler;
|
||||
pub mod response_header_handler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -573,6 +575,7 @@ impl HeartbeatHandlerGroupBuilder {
|
||||
self.add_handler_last(publish_heartbeat_handler);
|
||||
}
|
||||
self.add_handler_last(CollectStatsHandler::new(self.flush_stats_factor));
|
||||
self.add_handler_last(RemapFlowPeerHandler::default());
|
||||
|
||||
if let Some(flow_state_handler) = self.flow_state_handler.take() {
|
||||
self.add_handler_last(flow_state_handler);
|
||||
@@ -853,7 +856,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(12, handlers.len());
|
||||
assert_eq!(13, handlers.len());
|
||||
|
||||
let names = [
|
||||
"ResponseHeaderHandler",
|
||||
@@ -868,6 +871,7 @@ mod tests {
|
||||
"MailboxHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -888,7 +892,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(13, handlers.len());
|
||||
assert_eq!(14, handlers.len());
|
||||
|
||||
let names = [
|
||||
"ResponseHeaderHandler",
|
||||
@@ -904,6 +908,7 @@ mod tests {
|
||||
"CollectStatsHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -921,7 +926,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(13, handlers.len());
|
||||
assert_eq!(14, handlers.len());
|
||||
|
||||
let names = [
|
||||
"CollectStatsHandler",
|
||||
@@ -937,6 +942,7 @@ mod tests {
|
||||
"MailboxHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -954,7 +960,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(13, handlers.len());
|
||||
assert_eq!(14, handlers.len());
|
||||
|
||||
let names = [
|
||||
"ResponseHeaderHandler",
|
||||
@@ -970,6 +976,7 @@ mod tests {
|
||||
"CollectStatsHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -987,7 +994,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(13, handlers.len());
|
||||
assert_eq!(14, handlers.len());
|
||||
|
||||
let names = [
|
||||
"ResponseHeaderHandler",
|
||||
@@ -1003,6 +1010,7 @@ mod tests {
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"ResponseHeaderHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -1020,7 +1028,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(12, handlers.len());
|
||||
assert_eq!(13, handlers.len());
|
||||
|
||||
let names = [
|
||||
"ResponseHeaderHandler",
|
||||
@@ -1035,6 +1043,7 @@ mod tests {
|
||||
"CollectStatsHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -1052,7 +1061,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(12, handlers.len());
|
||||
assert_eq!(13, handlers.len());
|
||||
|
||||
let names = [
|
||||
"ResponseHeaderHandler",
|
||||
@@ -1067,6 +1076,7 @@ mod tests {
|
||||
"MailboxHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"ResponseHeaderHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
@@ -1084,7 +1094,7 @@ mod tests {
|
||||
|
||||
let group = builder.build().unwrap();
|
||||
let handlers = group.handlers;
|
||||
assert_eq!(12, handlers.len());
|
||||
assert_eq!(13, handlers.len());
|
||||
|
||||
let names = [
|
||||
"CollectStatsHandler",
|
||||
@@ -1099,6 +1109,7 @@ mod tests {
|
||||
"MailboxHandler",
|
||||
"FilterInactiveRegionStatsHandler",
|
||||
"CollectStatsHandler",
|
||||
"RemapFlowPeerHandler",
|
||||
];
|
||||
|
||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||
|
||||
@@ -23,8 +23,8 @@ pub struct CheckLeaderHandler;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeartbeatHandler for CheckLeaderHandler {
|
||||
fn is_acceptable(&self, role: Role) -> bool {
|
||||
role == Role::Datanode
|
||||
fn is_acceptable(&self, _role: Role) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, NodeInfo as PbNodeInfo, Role};
|
||||
use common_meta::cluster;
|
||||
use common_meta::cluster::{
|
||||
DatanodeStatus, FlownodeStatus, FrontendStatus, NodeInfo, NodeInfoKey, NodeStatus,
|
||||
};
|
||||
@@ -42,7 +41,7 @@ impl HeartbeatHandler for CollectFrontendClusterInfoHandler {
|
||||
ctx: &mut Context,
|
||||
_acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let Some((key, peer, info)) = extract_base_info(req, Role::Frontend) else {
|
||||
let Some((key, peer, info)) = extract_base_info(req) else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
@@ -75,7 +74,7 @@ impl HeartbeatHandler for CollectFlownodeClusterInfoHandler {
|
||||
ctx: &mut Context,
|
||||
_acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let Some((key, peer, info)) = extract_base_info(req, Role::Flownode) else {
|
||||
let Some((key, peer, info)) = extract_base_info(req) else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
@@ -109,7 +108,7 @@ impl HeartbeatHandler for CollectDatanodeClusterInfoHandler {
|
||||
ctx: &mut Context,
|
||||
acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let Some((key, peer, info)) = extract_base_info(req, Role::Datanode) else {
|
||||
let Some((key, peer, info)) = extract_base_info(req) else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
@@ -144,16 +143,9 @@ impl HeartbeatHandler for CollectDatanodeClusterInfoHandler {
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_base_info(
|
||||
req: &HeartbeatRequest,
|
||||
role: Role,
|
||||
) -> Option<(NodeInfoKey, Peer, PbNodeInfo)> {
|
||||
let HeartbeatRequest {
|
||||
header, peer, info, ..
|
||||
} = req;
|
||||
let Some(header) = &header else {
|
||||
return None;
|
||||
};
|
||||
fn extract_base_info(request: &HeartbeatRequest) -> Option<(NodeInfoKey, Peer, PbNodeInfo)> {
|
||||
let HeartbeatRequest { peer, info, .. } = request;
|
||||
let key = NodeInfoKey::new(request)?;
|
||||
let Some(peer) = &peer else {
|
||||
return None;
|
||||
};
|
||||
@@ -161,23 +153,11 @@ fn extract_base_info(
|
||||
return None;
|
||||
};
|
||||
|
||||
Some((
|
||||
NodeInfoKey {
|
||||
cluster_id: header.cluster_id,
|
||||
role: match role {
|
||||
Role::Datanode => cluster::Role::Datanode,
|
||||
Role::Frontend => cluster::Role::Frontend,
|
||||
Role::Flownode => cluster::Role::Flownode,
|
||||
},
|
||||
node_id: peer.id,
|
||||
},
|
||||
Peer::from(peer.clone()),
|
||||
info.clone(),
|
||||
))
|
||||
Some((key, Peer::from(peer.clone()), info.clone()))
|
||||
}
|
||||
|
||||
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
||||
let key = key.into();
|
||||
let key = (&key).into();
|
||||
let value = value.try_into().context(InvalidClusterInfoFormatSnafu)?;
|
||||
let put_req = PutRequest {
|
||||
key,
|
||||
|
||||
@@ -21,7 +21,7 @@ use common_meta::key::node_address::{NodeAddressKey, NodeAddressValue};
|
||||
use common_meta::key::{MetadataKey, MetadataValue};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::store::PutRequest;
|
||||
use common_telemetry::{error, warn};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use dashmap::DashMap;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -185,6 +185,10 @@ async fn rewrite_node_address(ctx: &mut Context, stat: &Stat) {
|
||||
|
||||
match ctx.leader_cached_kv_backend.put(put).await {
|
||||
Ok(_) => {
|
||||
info!(
|
||||
"Successfully updated datanode `NodeAddressValue`: {:?}",
|
||||
peer
|
||||
);
|
||||
// broadcast invalidating cache
|
||||
let cache_idents = stat
|
||||
.table_ids()
|
||||
@@ -200,11 +204,14 @@ async fn rewrite_node_address(ctx: &mut Context, stat: &Stat) {
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(e; "Failed to update NodeAddressValue: {:?}", peer);
|
||||
error!(e; "Failed to update datanode `NodeAddressValue`: {:?}", peer);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to serialize NodeAddressValue: {:?}", peer);
|
||||
warn!(
|
||||
"Failed to serialize datanode `NodeAddressValue`: {:?}",
|
||||
peer
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
92
src/meta-srv/src/handler/remap_flow_peer_handler.rs
Normal file
92
src/meta-srv/src/handler/remap_flow_peer_handler.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, Peer, Role};
|
||||
use common_meta::key::node_address::{NodeAddressKey, NodeAddressValue};
|
||||
use common_meta::key::{MetadataKey, MetadataValue};
|
||||
use common_meta::rpc::store::PutRequest;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use dashmap::DashMap;
|
||||
|
||||
use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler};
|
||||
use crate::metasrv::Context;
|
||||
use crate::Result;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RemapFlowPeerHandler {
|
||||
/// flow_node_id -> epoch
|
||||
epoch_cache: DashMap<u64, u64>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeartbeatHandler for RemapFlowPeerHandler {
|
||||
fn is_acceptable(&self, role: Role) -> bool {
|
||||
role == Role::Flownode
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: &HeartbeatRequest,
|
||||
ctx: &mut Context,
|
||||
_acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let Some(peer) = req.peer.as_ref() else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
let current_epoch = req.node_epoch;
|
||||
let flow_node_id = peer.id;
|
||||
|
||||
let refresh = if let Some(mut epoch) = self.epoch_cache.get_mut(&flow_node_id) {
|
||||
if current_epoch > *epoch.value() {
|
||||
*epoch.value_mut() = current_epoch;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
self.epoch_cache.insert(flow_node_id, current_epoch);
|
||||
true
|
||||
};
|
||||
|
||||
if refresh {
|
||||
rewrite_node_address(ctx, peer).await;
|
||||
}
|
||||
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
async fn rewrite_node_address(ctx: &mut Context, peer: &Peer) {
|
||||
let key = NodeAddressKey::with_flownode(peer.id).to_bytes();
|
||||
if let Ok(value) = NodeAddressValue::new(peer.clone().into()).try_as_raw_value() {
|
||||
let put = PutRequest {
|
||||
key,
|
||||
value,
|
||||
prev_kv: false,
|
||||
};
|
||||
|
||||
match ctx.leader_cached_kv_backend.put(put).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully updated flow `NodeAddressValue`: {:?}", peer);
|
||||
// TODO(discord): broadcast invalidating cache to all frontends
|
||||
}
|
||||
Err(e) => {
|
||||
error!(e; "Failed to update flow `NodeAddressValue`: {:?}", peer);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to serialize flow `NodeAddressValue`: {:?}", peer);
|
||||
}
|
||||
}
|
||||
@@ -32,6 +32,7 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBac
|
||||
use common_meta::leadership_notifier::{
|
||||
LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
|
||||
};
|
||||
use common_meta::node_expiry_listener::NodeExpiryListener;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
||||
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
||||
@@ -151,6 +152,8 @@ pub struct MetasrvOptions {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
/// Lock id for meta kv election. Only effect when using pg_kvbackend.
|
||||
pub meta_election_lock_id: u64,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub node_max_idle_time: Duration,
|
||||
}
|
||||
|
||||
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||
@@ -192,6 +195,7 @@ impl Default for MetasrvOptions {
|
||||
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
||||
node_max_idle_time: Duration::from_secs(24 * 60 * 60),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -442,6 +446,10 @@ impl Metasrv {
|
||||
leadership_change_notifier.add_listener(self.wal_options_allocator.clone());
|
||||
leadership_change_notifier
|
||||
.add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
|
||||
leadership_change_notifier.add_listener(Arc::new(NodeExpiryListener::new(
|
||||
self.options.node_max_idle_time,
|
||||
self.in_memory.clone(),
|
||||
)));
|
||||
if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
|
||||
leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
|
||||
}
|
||||
|
||||
@@ -68,13 +68,15 @@ impl heartbeat_server::Heartbeat for Metasrv {
|
||||
};
|
||||
|
||||
if pusher_id.is_none() {
|
||||
pusher_id = register_pusher(&handler_group, header, tx.clone()).await;
|
||||
pusher_id =
|
||||
Some(register_pusher(&handler_group, header, tx.clone()).await);
|
||||
}
|
||||
if let Some(k) = &pusher_id {
|
||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&[&k.to_string()]);
|
||||
} else {
|
||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&["none"]);
|
||||
}
|
||||
|
||||
let res = handler_group
|
||||
.handle(req, ctx.clone())
|
||||
.await
|
||||
@@ -173,13 +175,13 @@ async fn register_pusher(
|
||||
handler_group: &HeartbeatHandlerGroup,
|
||||
header: &RequestHeader,
|
||||
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||
) -> Option<PusherId> {
|
||||
) -> PusherId {
|
||||
let role = header.role();
|
||||
let id = get_node_id(header);
|
||||
let pusher_id = PusherId::new(role, id);
|
||||
let pusher = Pusher::new(sender, header);
|
||||
handler_group.register_pusher(pusher_id, pusher).await;
|
||||
Some(pusher_id)
|
||||
pusher_id
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -17,13 +17,15 @@ use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{
|
||||
procedure_service_server, DdlTaskRequest as PbDdlTaskRequest,
|
||||
DdlTaskResponse as PbDdlTaskResponse, MigrateRegionRequest, MigrateRegionResponse,
|
||||
DdlTaskResponse as PbDdlTaskResponse, Error, MigrateRegionRequest, MigrateRegionResponse,
|
||||
ProcedureDetailRequest, ProcedureDetailResponse, ProcedureStateResponse, QueryProcedureRequest,
|
||||
ResponseHeader,
|
||||
};
|
||||
use common_meta::ddl::ExecutorContext;
|
||||
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest};
|
||||
use common_meta::rpc::procedure;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use common_telemetry::warn;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::{Request, Response};
|
||||
|
||||
use super::GrpcResult;
|
||||
@@ -37,6 +39,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
&self,
|
||||
request: Request<QueryProcedureRequest>,
|
||||
) -> GrpcResult<ProcedureStateResponse> {
|
||||
if !self.is_leader() {
|
||||
let resp = ProcedureStateResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `query procedure state` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let QueryProcedureRequest { header, pid, .. } = request.into_inner();
|
||||
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
||||
let pid = pid.context(error::MissingRequiredParameterSnafu { param: "pid" })?;
|
||||
@@ -57,6 +69,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
}
|
||||
|
||||
async fn ddl(&self, request: Request<PbDdlTaskRequest>) -> GrpcResult<PbDdlTaskResponse> {
|
||||
if !self.is_leader() {
|
||||
let resp = PbDdlTaskResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `ddl` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let PbDdlTaskRequest {
|
||||
header,
|
||||
query_context,
|
||||
@@ -99,12 +121,15 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
&self,
|
||||
request: Request<MigrateRegionRequest>,
|
||||
) -> GrpcResult<MigrateRegionResponse> {
|
||||
ensure!(
|
||||
self.meta_peer_client().is_leader(),
|
||||
error::UnexpectedSnafu {
|
||||
violated: "Trying to submit a region migration procedure to non-leader meta server"
|
||||
}
|
||||
);
|
||||
if !self.is_leader() {
|
||||
let resp = MigrateRegionResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `migrate` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let MigrateRegionRequest {
|
||||
header,
|
||||
@@ -150,6 +175,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
||||
&self,
|
||||
request: Request<ProcedureDetailRequest>,
|
||||
) -> GrpcResult<ProcedureDetailResponse> {
|
||||
if !self.is_leader() {
|
||||
let resp = ProcedureDetailResponse {
|
||||
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
warn!("The current meta is not leader, but a `procedure details` request have reached the meta. Detail: {:?}.", request);
|
||||
return Ok(Response::new(resp));
|
||||
}
|
||||
|
||||
let ProcedureDetailRequest { header } = request.into_inner();
|
||||
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
||||
let metas = self
|
||||
|
||||
@@ -142,6 +142,7 @@ impl DataRegion {
|
||||
c.column_id = new_column_id_start + delta as u32;
|
||||
c.column_schema.set_nullable();
|
||||
match index_options {
|
||||
IndexOptions::None => {}
|
||||
IndexOptions::Inverted => {
|
||||
c.column_schema.set_inverted_index(true);
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ use api::v1::SemanticType;
|
||||
use common_telemetry::info;
|
||||
use common_time::{Timestamp, FOREVER};
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use datatypes::schema::{ColumnSchema, SkippingIndexOptions};
|
||||
use datatypes::value::Value;
|
||||
use mito2::engine::MITO_ENGINE_NAME;
|
||||
use object_store::util::join_dir;
|
||||
@@ -55,6 +55,8 @@ use crate::error::{
|
||||
use crate::metrics::PHYSICAL_REGION_COUNT;
|
||||
use crate::utils::{self, to_data_region_id, to_metadata_region_id};
|
||||
|
||||
const DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY: u32 = 1024;
|
||||
|
||||
impl MetricEngineInner {
|
||||
pub async fn create_regions(
|
||||
&self,
|
||||
@@ -440,6 +442,7 @@ impl MetricEngineInner {
|
||||
///
|
||||
/// Return `[table_id_col, tsid_col]`
|
||||
fn internal_column_metadata() -> [ColumnMetadata; 2] {
|
||||
// Safety: BloomFilter is a valid skipping index type
|
||||
let metric_name_col = ColumnMetadata {
|
||||
column_id: ReservedColumnId::table_id(),
|
||||
semantic_type: SemanticType::Tag,
|
||||
@@ -448,7 +451,11 @@ impl MetricEngineInner {
|
||||
ConcreteDataType::uint32_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_inverted_index(true),
|
||||
.with_skipping_options(SkippingIndexOptions {
|
||||
granularity: DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY,
|
||||
index_type: datatypes::schema::SkippingIndexType::BloomFilter,
|
||||
})
|
||||
.unwrap(),
|
||||
};
|
||||
let tsid_col = ColumnMetadata {
|
||||
column_id: ReservedColumnId::tsid(),
|
||||
|
||||
@@ -30,9 +30,10 @@ impl MetricEngineInner {
|
||||
pub async fn drop_region(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
_req: RegionDropRequest,
|
||||
req: RegionDropRequest,
|
||||
) -> Result<AffectedRows> {
|
||||
let data_region_id = utils::to_data_region_id(region_id);
|
||||
let fast_path = req.fast_path;
|
||||
|
||||
// enclose the guard in a block to prevent the guard from polluting the async context
|
||||
let (is_physical_region, is_physical_region_busy) = {
|
||||
@@ -52,7 +53,7 @@ impl MetricEngineInner {
|
||||
|
||||
if is_physical_region {
|
||||
// check if there is no logical region relates to this physical region
|
||||
if is_physical_region_busy {
|
||||
if is_physical_region_busy && !fast_path {
|
||||
// reject if there is any present logical region
|
||||
return Err(PhysicalRegionBusySnafu {
|
||||
region_id: data_region_id,
|
||||
@@ -60,9 +61,21 @@ impl MetricEngineInner {
|
||||
.build());
|
||||
}
|
||||
|
||||
self.drop_physical_region(data_region_id).await
|
||||
return self.drop_physical_region(data_region_id).await;
|
||||
}
|
||||
|
||||
if fast_path {
|
||||
// for fast path, we don't delete the metadata in the metadata region.
|
||||
// it only remove the logical region from the engine state.
|
||||
//
|
||||
// The drop database procedure will ensure the metadata region and data region are dropped eventually.
|
||||
self.state
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove_logical_region(region_id)?;
|
||||
|
||||
Ok(0)
|
||||
} else {
|
||||
// cannot merge these two `if` otherwise the stupid type checker will complain
|
||||
let metadata_region_id = self
|
||||
.state
|
||||
.read()
|
||||
@@ -87,13 +100,16 @@ impl MetricEngineInner {
|
||||
// Since the physical regions are going to be dropped, we don't need to
|
||||
// update the contents in metadata region.
|
||||
self.mito
|
||||
.handle_request(data_region_id, RegionRequest::Drop(RegionDropRequest {}))
|
||||
.handle_request(
|
||||
data_region_id,
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
||||
self.mito
|
||||
.handle_request(
|
||||
metadata_region_id,
|
||||
RegionRequest::Drop(RegionDropRequest {}),
|
||||
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||
)
|
||||
.await
|
||||
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
||||
|
||||
@@ -40,6 +40,7 @@ pub struct PhysicalRegionOptions {
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub enum IndexOptions {
|
||||
#[default]
|
||||
None,
|
||||
Inverted,
|
||||
Skipping {
|
||||
granularity: u32,
|
||||
|
||||
@@ -16,7 +16,7 @@ api.workspace = true
|
||||
aquamarine.workspace = true
|
||||
async-channel = "1.9"
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
async-trait.workspace = true
|
||||
bytemuck.workspace = true
|
||||
bytes.workspace = true
|
||||
common-base.workspace = true
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user