mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 13:52:59 +00:00
Compare commits
45 Commits
release/v0
...
feat/objbe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0d5b423eb7 | ||
|
|
26bdb6a413 | ||
|
|
2fe21469f8 | ||
|
|
3aa67c7af4 | ||
|
|
e0d3e6ae97 | ||
|
|
2ce476dc42 | ||
|
|
69a816fa0c | ||
|
|
dcf5a62014 | ||
|
|
f3aa967aae | ||
|
|
93e8510b2a | ||
|
|
53c58494fd | ||
|
|
741c5e2fb1 | ||
|
|
d68215dc88 | ||
|
|
bcd63fdb87 | ||
|
|
f4c527cddf | ||
|
|
8da5949fc5 | ||
|
|
db6a63ef6c | ||
|
|
f166b93b02 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab | ||
|
|
c1d18d9980 | ||
|
|
5d9faaaf39 | ||
|
|
538875abee | ||
|
|
5ed09c4584 | ||
|
|
3f6a41eac5 | ||
|
|
ff0dcf12c5 | ||
|
|
5b1fca825a | ||
|
|
7bd108e2be | ||
|
|
286f225e50 | ||
|
|
4f988b5ba9 | ||
|
|
500d0852eb | ||
|
|
8d05fb3503 | ||
|
|
d7b6718be0 | ||
|
|
6f0783e17e | ||
|
|
d69e93b91a | ||
|
|
76083892cd | ||
|
|
7981c06989 | ||
|
|
97bb1519f8 | ||
|
|
1d8c9c1843 | ||
|
|
71007e200c | ||
|
|
a0ff9e751e |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
* @GreptimeTeam/db-approver
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
## [Module] Databse Engine
|
## [Module] Database Engine
|
||||||
/src/index @zhongzc
|
/src/index @zhongzc
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
/src/query @evenyag
|
/src/query @evenyag
|
||||||
|
|||||||
@@ -41,7 +41,14 @@ runs:
|
|||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-ubuntu image
|
- name: Set up qemu for multi-platform builds
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# The latest version will lead to segmentation fault.
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -52,7 +59,7 @@ runs:
|
|||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image
|
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -69,8 +76,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ runs:
|
|||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
features: servers/dashboard,pg_kvbackend
|
features: servers/dashboard
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
@@ -70,7 +70,7 @@ runs:
|
|||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard,pg_kvbackend
|
features: servers/dashboard
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ runs:
|
|||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: make test sqlness-test
|
run: make test sqlness-test
|
||||||
env:
|
env:
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
SQLNESS_OPTS: "--preserve-state"
|
||||||
|
|
||||||
|
|||||||
@@ -64,11 +64,11 @@ inputs:
|
|||||||
upload-max-retry-times:
|
upload-max-retry-times:
|
||||||
description: Max retry times for uploading artifacts to S3
|
description: Max retry times for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "20"
|
default: "30"
|
||||||
upload-retry-timeout:
|
upload-retry-timeout:
|
||||||
description: Timeout for uploading artifacts to S3
|
description: Timeout for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "30" # minutes
|
default: "120" # minutes
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
@@ -8,15 +8,15 @@ inputs:
|
|||||||
default: 2
|
default: 2
|
||||||
description: "Number of Datanode replicas"
|
description: "Number of Datanode replicas"
|
||||||
meta-replicas:
|
meta-replicas:
|
||||||
default: 1
|
default: 2
|
||||||
description: "Number of Metasrv replicas"
|
description: "Number of Metasrv replicas"
|
||||||
image-registry:
|
image-registry:
|
||||||
default: "docker.io"
|
default: "docker.io"
|
||||||
description: "Image registry"
|
description: "Image registry"
|
||||||
image-repository:
|
image-repository:
|
||||||
default: "greptime/greptimedb"
|
default: "greptime/greptimedb"
|
||||||
description: "Image repository"
|
description: "Image repository"
|
||||||
image-tag:
|
image-tag:
|
||||||
default: "latest"
|
default: "latest"
|
||||||
description: 'Image tag'
|
description: 'Image tag'
|
||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
@@ -32,12 +32,12 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Install GreptimeDB operator
|
- name: Install GreptimeDB operator
|
||||||
uses: nick-fields/retry@v3
|
uses: nick-fields/retry@v3
|
||||||
with:
|
with:
|
||||||
timeout_minutes: 3
|
timeout_minutes: 3
|
||||||
max_attempts: 3
|
max_attempts: 3
|
||||||
shell: bash
|
shell: bash
|
||||||
command: |
|
command: |
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
helm repo update
|
helm repo update
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install \
|
--install \
|
||||||
@@ -48,10 +48,10 @@ runs:
|
|||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Install GreptimeDB cluster
|
- name: Install GreptimeDB cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install my-greptimedb \
|
--install my-greptimedb \
|
||||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
@@ -59,7 +59,7 @@ runs:
|
|||||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||||
@@ -72,7 +72,7 @@ runs:
|
|||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
while true; do
|
while true; do
|
||||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||||
if [ "$PHASE" == "Running" ]; then
|
if [ "$PHASE" == "Running" ]; then
|
||||||
echo "Cluster is ready"
|
echo "Cluster is ready"
|
||||||
@@ -86,10 +86,10 @@ runs:
|
|||||||
- name: Print GreptimeDB info
|
- name: Print GreptimeDB info
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl get all --show-labels -n my-greptimedb
|
kubectl get all --show-labels -n my-greptimedb
|
||||||
- name: Describe Nodes
|
- name: Describe Nodes
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ meta:
|
|||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
global_rt_size = 4
|
global_rt_size = 4
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
provider = "kafka"
|
provider = "kafka"
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
num_topics = 3
|
num_topics = 3
|
||||||
|
auto_prune_interval = "30s"
|
||||||
|
trigger_flush_threshold = 100
|
||||||
|
|
||||||
|
|
||||||
[datanode]
|
[datanode]
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "120s"
|
timeout = "120s"
|
||||||
@@ -21,7 +22,7 @@ datanode:
|
|||||||
[wal]
|
[wal]
|
||||||
provider = "kafka"
|
provider = "kafka"
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
linger = "2ms"
|
overwrite_entry_start_id = true
|
||||||
frontend:
|
frontend:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
|
|||||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Start EC2 runner
|
- name: Start EC2 runner
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
id: start-linux-arm64-ec2-runner
|
id: start-linux-arm64-ec2-runner
|
||||||
with:
|
with:
|
||||||
mode: start
|
mode: start
|
||||||
|
|||||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -33,7 +33,7 @@ runs:
|
|||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
with:
|
with:
|
||||||
mode: stop
|
mode: stop
|
||||||
label: ${{ inputs.label }}
|
label: ${{ inputs.label }}
|
||||||
|
|||||||
15
.github/labeler.yaml
vendored
Normal file
15
.github/labeler.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
ci:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: .github/**
|
||||||
|
|
||||||
|
docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docker/**
|
||||||
|
|
||||||
|
documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docs/**
|
||||||
|
|
||||||
|
dashboard:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: grafana/**
|
||||||
42
.github/scripts/check-version.sh
vendored
Executable file
42
.github/scripts/check-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Get current version
|
||||||
|
CURRENT_VERSION=$1
|
||||||
|
if [ -z "$CURRENT_VERSION" ]; then
|
||||||
|
echo "Error: Failed to get current version"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the latest version from GitHub Releases
|
||||||
|
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
||||||
|
|
||||||
|
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
||||||
|
echo "Error: Failed to fetch latest version from GitHub"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the latest version
|
||||||
|
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
||||||
|
|
||||||
|
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
||||||
|
echo "Error: No valid version found in GitHub releases"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
||||||
|
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||||
|
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||||
|
|
||||||
|
echo "Current version: $CLEAN_CURRENT"
|
||||||
|
echo "Latest release version: $CLEAN_LATEST"
|
||||||
|
|
||||||
|
# Use sort -V to compare versions
|
||||||
|
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
||||||
|
|
||||||
|
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||||
|
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||||
|
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||||
|
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,24 +8,25 @@ set -e
|
|||||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||||
function create_version() {
|
function create_version() {
|
||||||
# Read from envrionment variables.
|
# Read from environment variables.
|
||||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||||
echo "GITHUB_EVENT_NAME is empty"
|
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||||
echo "NEXT_RELEASE_VERSION is empty"
|
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||||
exit 1
|
# NOTE: Need a `v` prefix for the version string.
|
||||||
|
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
# It will be like 'nightly-20230808-7d0d8dc6'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||||
exit 0
|
exit 0
|
||||||
@@ -35,7 +36,7 @@ function create_version() {
|
|||||||
# It will be like 'dev-2023080819-f0e7216c'.
|
# It will be like 'dev-2023080819-f0e7216c'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||||
if [ -z "$COMMIT_SHA" ]; then
|
if [ -z "$COMMIT_SHA" ]; then
|
||||||
echo "COMMIT_SHA is empty in dev build"
|
echo "COMMIT_SHA is empty in dev build" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||||
@@ -45,7 +46,7 @@ function create_version() {
|
|||||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||||
echo "GITHUB_REF_NAME is empty in push event"
|
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "$GITHUB_REF_NAME"
|
echo "$GITHUB_REF_NAME"
|
||||||
@@ -54,15 +55,15 @@ function create_version() {
|
|||||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||||
else
|
else
|
||||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# You can run as following examples:
|
# You can run as following examples:
|
||||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
create_version
|
create_version
|
||||||
|
|||||||
6
.github/scripts/deploy-greptimedb.sh
vendored
6
.github/scripts/deploy-greptimedb.sh
vendored
@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
|||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
|
|
||||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
# Create a cluster with 1 control-plane node and 5 workers.
|
||||||
function create_kind_cluster() {
|
function create_kind_cluster() {
|
||||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
kind: Cluster
|
kind: Cluster
|
||||||
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set storage.s3.region="$AWS_REGION" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set storage.s3.root="$DATA_ROOT" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
|
|||||||
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DEV_BUILDER_IMAGE_TAG=$1
|
||||||
|
|
||||||
|
update_dev_builder_version() {
|
||||||
|
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||||
|
echo "Error: Should specify the dev-builder image tag"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email greptimedb-ci@greptime.com
|
||||||
|
git config --global user.name greptimedb-ci
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update the dev-builder image tag in the Makefile.
|
||||||
|
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add Makefile
|
||||||
|
git commit -m "ci: update dev-builder image tag"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "ci: update dev-builder image tag" \
|
||||||
|
--body "This PR updates the dev-builder image tag" \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_dev_builder_version
|
||||||
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
VERSION=${VERSION}
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||||
|
|
||||||
|
update_helm_charts_version() {
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email update-helm-charts-version@greptime.com
|
||||||
|
git config --global user.name update-helm-charts-version
|
||||||
|
|
||||||
|
# Clone helm-charts repository.
|
||||||
|
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
||||||
|
cd helm-charts
|
||||||
|
|
||||||
|
# Set default remote for gh CLI
|
||||||
|
gh repo set-default GreptimeTeam/helm-charts
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update version.
|
||||||
|
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
||||||
|
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
||||||
|
|
||||||
|
# Update docs.
|
||||||
|
make docs
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add .
|
||||||
|
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||||
|
--body "This PR updates the GreptimeDB version." \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_helm_charts_version
|
||||||
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
VERSION=${VERSION}
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||||
|
|
||||||
|
update_homebrew_greptime_version() {
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email update-greptime-version@greptime.com
|
||||||
|
git config --global user.name update-greptime-version
|
||||||
|
|
||||||
|
# Clone helm-charts repository.
|
||||||
|
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
||||||
|
cd homebrew-greptime
|
||||||
|
|
||||||
|
# Set default remote for gh CLI
|
||||||
|
gh repo set-default GreptimeTeam/homebrew-greptime
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update version.
|
||||||
|
make update-greptime-version VERSION=${VERSION}
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add .
|
||||||
|
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||||
|
--body "This PR updates the GreptimeDB version." \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_homebrew_greptime_version
|
||||||
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
2
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -41,7 +41,7 @@ function upload_artifacts() {
|
|||||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||||
function update_version_info() {
|
function update_version_info() {
|
||||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
echo "Updating latest-version.txt"
|
echo "Updating latest-version.txt"
|
||||||
echo "$VERSION" > latest-version.txt
|
echo "$VERSION" > latest-version.txt
|
||||||
|
|||||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -14,7 +14,7 @@ name: Build API docs
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
36
.github/workflows/dev-build.yml
vendored
36
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -55,6 +55,11 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
|
upload_artifacts_to_s3:
|
||||||
|
type: boolean
|
||||||
|
description: Whether upload artifacts to s3
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
cargo_profile:
|
cargo_profile:
|
||||||
type: choice
|
type: choice
|
||||||
description: The cargo profile to use in building GreptimeDB.
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
@@ -83,7 +88,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -218,7 +223,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -239,6 +244,13 @@ jobs:
|
|||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
dev-mode: true # Only build the standard images.
|
dev-mode: true # Only build the standard images.
|
||||||
|
|
||||||
|
- name: Echo Docker image tag to step summary
|
||||||
|
run: |
|
||||||
|
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
- name: Set build result
|
- name: Set build result
|
||||||
id: set-build-result
|
id: set-build-result
|
||||||
run: |
|
run: |
|
||||||
@@ -251,7 +263,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -274,7 +286,7 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
upload-to-s3: false
|
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||||
dev-mode: true # Only build the standard images(exclude centos images).
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
update-version-info: false # Don't update the version info in S3.
|
update-version-info: false # Don't update the version info in S3.
|
||||||
@@ -283,7 +295,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -309,7 +321,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -337,7 +349,7 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
|
|||||||
80
.github/workflows/develop.yml
vendored
80
.github/workflows/develop.yml
vendored
@@ -22,8 +22,9 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check typos and docs
|
name: Check typos and docs
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -36,7 +37,8 @@ jobs:
|
|||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -45,11 +47,12 @@ jobs:
|
|||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -71,8 +74,9 @@ jobs:
|
|||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
toml:
|
toml:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -85,11 +89,12 @@ jobs:
|
|||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Build GreptimeDB binaries
|
name: Build GreptimeDB binaries
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -127,6 +132,7 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
fuzztest:
|
fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test
|
name: Fuzz Test
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -183,11 +189,13 @@ jobs:
|
|||||||
max-total-time: 120
|
max-total-time: 120
|
||||||
|
|
||||||
unstable-fuzztest:
|
unstable-fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Unstable Fuzz Test
|
name: Unstable Fuzz Test
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||||
steps:
|
steps:
|
||||||
@@ -215,12 +223,12 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
cargo install cargo-fuzz cargo-gc-bin --force
|
cargo install cargo-fuzz cargo-gc-bin --force
|
||||||
- name: Download pre-built binariy
|
- name: Download pre-built binary
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bin
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip bianry
|
- name: Unzip binary
|
||||||
run: |
|
run: |
|
||||||
tar -xvf ./bin.tar.gz
|
tar -xvf ./bin.tar.gz
|
||||||
rm ./bin.tar.gz
|
rm ./bin.tar.gz
|
||||||
@@ -242,13 +250,19 @@ jobs:
|
|||||||
name: unstable-fuzz-logs
|
name: unstable-fuzz-logs
|
||||||
path: /tmp/unstable-greptime/
|
path: /tmp/unstable-greptime/
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
- name: Describe pods
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
|
|
||||||
build-greptime-ci:
|
build-greptime-ci:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Build GreptimeDB binary (profile-CI)
|
name: Build GreptimeDB binary (profile-CI)
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -267,7 +281,7 @@ jobs:
|
|||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime bianry
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||||
@@ -285,11 +299,13 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
distributed-fuzztest:
|
distributed-fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
mode:
|
mode:
|
||||||
@@ -319,9 +335,9 @@ jobs:
|
|||||||
name: Setup Minio
|
name: Setup Minio
|
||||||
uses: ./.github/actions/setup-minio
|
uses: ./.github/actions/setup-minio
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup Kafka cluser
|
name: Setup Kafka cluster
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluster
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
@@ -394,6 +410,11 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
- name: Describe pod
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -416,11 +437,13 @@ jobs:
|
|||||||
docker system prune -f
|
docker system prune -f
|
||||||
|
|
||||||
distributed-fuzztest-with-chaos:
|
distributed-fuzztest-with-chaos:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||||
mode:
|
mode:
|
||||||
@@ -465,9 +488,9 @@ jobs:
|
|||||||
name: Setup Minio
|
name: Setup Minio
|
||||||
uses: ./.github/actions/setup-minio
|
uses: ./.github/actions/setup-minio
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup Kafka cluser
|
name: Setup Kafka cluster
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluster
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
@@ -541,6 +564,11 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
- name: Describe pods
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -563,12 +591,14 @@ jobs:
|
|||||||
docker system prune -f
|
docker system prune -f
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
opts: ""
|
opts: ""
|
||||||
@@ -576,7 +606,7 @@ jobs:
|
|||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
kafka: true
|
kafka: true
|
||||||
- name: "Pg Kvbackend"
|
- name: "PostgreSQL KvBackend"
|
||||||
opts: "--setup-pg"
|
opts: "--setup-pg"
|
||||||
kafka: false
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
@@ -606,8 +636,9 @@ jobs:
|
|||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -623,8 +654,9 @@ jobs:
|
|||||||
run: make fmt-check
|
run: make fmt-check
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -648,6 +680,7 @@ jobs:
|
|||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
conflict-check:
|
conflict-check:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check for conflict
|
name: Check for conflict
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -658,7 +691,7 @@ jobs:
|
|||||||
uses: olivernybroe/action-conflict-finder@v4.0
|
uses: olivernybroe/action-conflict-finder@v4.0
|
||||||
|
|
||||||
test:
|
test:
|
||||||
if: github.event_name != 'merge_group'
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||||
runs-on: ubuntu-22.04-arm
|
runs-on: ubuntu-22.04-arm
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
needs: [conflict-check, clippy, fmt]
|
needs: [conflict-check, clippy, fmt]
|
||||||
@@ -673,7 +706,7 @@ jobs:
|
|||||||
- name: Install toolchain
|
- name: Install toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
cache: false
|
cache: false
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
@@ -704,13 +737,14 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event_name == 'merge_group'
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
||||||
runs-on: ubuntu-20.04-8-cores
|
runs-on: ubuntu-22.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -755,6 +789,7 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
@@ -768,9 +803,10 @@ jobs:
|
|||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
# compat:
|
# compat:
|
||||||
|
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
# name: Compatibility Test
|
# name: Compatibility Test
|
||||||
# needs: build
|
# needs: build
|
||||||
# runs-on: ubuntu-20.04
|
# runs-on: ubuntu-22.04
|
||||||
# timeout-minutes: 60
|
# timeout-minutes: 60
|
||||||
# steps:
|
# steps:
|
||||||
# - uses: actions/checkout@v4
|
# - uses: actions/checkout@v4
|
||||||
|
|||||||
6
.github/workflows/docbot.yml
vendored
6
.github/workflows/docbot.yml
vendored
@@ -3,9 +3,13 @@ on:
|
|||||||
pull_request_target:
|
pull_request_target:
|
||||||
types: [opened, edited]
|
types: [opened, edited]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docbot:
|
docbot:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
contents: read
|
contents: read
|
||||||
|
|||||||
16
.github/workflows/docs.yml
vendored
16
.github/workflows/docs.yml
vendored
@@ -31,7 +31,7 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -39,7 +39,7 @@ jobs:
|
|||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -49,29 +49,29 @@ jobs:
|
|||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
|
|||||||
22
.github/workflows/nightly-build.yml
vendored
22
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -70,7 +70,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -182,7 +182,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -214,7 +214,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -249,7 +249,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -275,7 +275,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -303,7 +303,7 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
env:
|
env:
|
||||||
|
|||||||
17
.github/workflows/nightly-ci.yml
vendored
17
.github/workflows/nightly-ci.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
sqlness-test:
|
sqlness-test:
|
||||||
name: Run sqlness test
|
name: Run sqlness test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -107,7 +107,6 @@ jobs:
|
|||||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
@@ -118,22 +117,22 @@ jobs:
|
|||||||
name: Run clean build on Linux
|
name: Run clean build on Linux
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
timeout-minutes: 60
|
timeout-minutes: 45
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: cachix/install-nix-action@v27
|
- uses: cachix/install-nix-action@v31
|
||||||
with:
|
- run: nix develop --command cargo check --bin greptime
|
||||||
nix_path: nixpkgs=channel:nixos-24.11
|
env:
|
||||||
- run: nix develop --command cargo build
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -146,7 +145,7 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [check-status]
|
needs: [check-status]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: 'PR Labeling'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
configuration-path: ".github/labeler.yaml"
|
||||||
|
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
|
size-label:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: pascalgn/size-label-action@v0.5.5
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
with:
|
||||||
|
sizes: >
|
||||||
|
{
|
||||||
|
"0": "XS",
|
||||||
|
"100": "S",
|
||||||
|
"300": "M",
|
||||||
|
"1000": "L",
|
||||||
|
"1500": "XL",
|
||||||
|
"2000": "XXL"
|
||||||
|
}
|
||||||
@@ -24,12 +24,20 @@ on:
|
|||||||
description: Release dev-builder-android image
|
description: Release dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
|
update_dev_builder_image_tag:
|
||||||
|
type: boolean
|
||||||
|
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
# The jobs are triggered by the following events:
|
||||||
runs-on: ubuntu-20.04-16-cores
|
# 1. Manually triggered workflow_dispatch event
|
||||||
|
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
version: ${{ steps.set-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
@@ -57,13 +65,13 @@ jobs:
|
|||||||
version: ${{ env.VERSION }}
|
version: ${{ env.VERSION }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
|
|
||||||
release-dev-builder-images-ecr:
|
release-dev-builder-images-ecr:
|
||||||
name: Release dev builder images to AWS ECR
|
name: Release dev builder images to AWS ECR
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -85,7 +93,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -106,7 +114,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -127,7 +135,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -148,7 +156,7 @@ jobs:
|
|||||||
|
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -162,7 +170,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -176,7 +184,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -190,7 +198,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
@@ -201,3 +209,24 @@ jobs:
|
|||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|
||||||
|
update-dev-builder-image-tag:
|
||||||
|
name: Update dev-builder image tag
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||||
|
needs: [
|
||||||
|
release-dev-builder-images
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Update dev-builder image tag
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|||||||
101
.github/workflows/release.yml
vendored
101
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -88,16 +88,14 @@ env:
|
|||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
|
|
||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
|
||||||
NEXT_RELEASE_VERSION: v0.12.0
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -112,6 +110,8 @@ jobs:
|
|||||||
|
|
||||||
# The 'version' use as the global tag name of the release workflow.
|
# The 'version' use as the global tag name of the release workflow.
|
||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
|
|
||||||
|
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -126,7 +126,7 @@ jobs:
|
|||||||
|
|
||||||
# The create-version will create a global variable named 'version' in the global workflows.
|
# The create-version will create a global variable named 'version' in the global workflows.
|
||||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -135,9 +135,13 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
|
||||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||||
|
|
||||||
|
- name: Check version
|
||||||
|
id: check-version
|
||||||
|
run: |
|
||||||
|
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||||
|
|
||||||
- name: Allocate linux-amd64 runner
|
- name: Allocate linux-amd64 runner
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
uses: ./.github/actions/start-runner
|
uses: ./.github/actions/start-runner
|
||||||
@@ -299,7 +303,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-2004-16-cores
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -317,7 +321,7 @@ jobs:
|
|||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: true
|
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -335,7 +339,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest-16-cores
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -364,7 +368,7 @@ jobs:
|
|||||||
dev-mode: false
|
dev-mode: false
|
||||||
upload-to-s3: true
|
upload-to-s3: true
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: true
|
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
@@ -377,7 +381,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -391,12 +395,12 @@ jobs:
|
|||||||
|
|
||||||
### Stop runners ###
|
### Stop runners ###
|
||||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -422,7 +426,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -444,11 +448,11 @@ jobs:
|
|||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
bump-doc-version:
|
bump-downstream-repo-versions:
|
||||||
name: Bump doc version
|
name: Bump downstream repo versions
|
||||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [allocate-runners]
|
needs: [allocate-runners, publish-github-release]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
permissions:
|
permissions:
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
@@ -459,13 +463,58 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Bump doc version
|
- name: Bump downstream repo versions
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
run: pnpm tsx bin/bump-doc-version.ts
|
run: pnpm tsx bin/bump-versions.ts
|
||||||
env:
|
env:
|
||||||
|
TARGET_REPOS: website,docs,demo
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||||
|
|
||||||
|
bump-helm-charts-version:
|
||||||
|
name: Bump helm charts version
|
||||||
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Bump helm charts version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-helm-charts-version.sh
|
||||||
|
|
||||||
|
bump-homebrew-greptime-version:
|
||||||
|
name: Bump homebrew greptime version
|
||||||
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Bump homebrew greptime version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-homebrew-greptme-version.sh
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
@@ -475,7 +524,7 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
permissions:
|
permissions:
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
|||||||
13
.github/workflows/semantic-pull-request.yml
vendored
13
.github/workflows/semantic-pull-request.yml
vendored
@@ -7,14 +7,21 @@ on:
|
|||||||
- reopened
|
- reopened
|
||||||
- edited
|
- edited
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Check Pull Request
|
- name: Check Pull Request
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -54,3 +54,10 @@ tests-fuzz/corpus/
|
|||||||
# Nix
|
# Nix
|
||||||
.direnv
|
.direnv
|
||||||
.envrc
|
.envrc
|
||||||
|
|
||||||
|
## default data home
|
||||||
|
greptimedb_data
|
||||||
|
|
||||||
|
# github
|
||||||
|
!/.github
|
||||||
|
|
||||||
|
|||||||
21
AUTHOR.md
21
AUTHOR.md
@@ -3,30 +3,28 @@
|
|||||||
## Individual Committers (in alphabetical order)
|
## Individual Committers (in alphabetical order)
|
||||||
|
|
||||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||||
* [KKould](https://github.com/KKould)
|
|
||||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
|
||||||
* [etolbakov](https://github.com/etolbakov)
|
* [etolbakov](https://github.com/etolbakov)
|
||||||
* [irenjj](https://github.com/irenjj)
|
* [irenjj](https://github.com/irenjj)
|
||||||
* [tisonkun](https://github.com/tisonkun)
|
* [KKould](https://github.com/KKould)
|
||||||
* [Lanqing Yang](https://github.com/lyang24)
|
* [Lanqing Yang](https://github.com/lyang24)
|
||||||
|
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||||
|
* [tisonkun](https://github.com/tisonkun)
|
||||||
|
|
||||||
|
|
||||||
## Team Members (in alphabetical order)
|
## Team Members (in alphabetical order)
|
||||||
|
|
||||||
* [Breeze-P](https://github.com/Breeze-P)
|
|
||||||
* [GrepTime](https://github.com/GrepTime)
|
|
||||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
|
||||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
|
||||||
* [WenyXu](https://github.com/WenyXu)
|
|
||||||
* [ZonaHex](https://github.com/ZonaHex)
|
|
||||||
* [apdong2022](https://github.com/apdong2022)
|
* [apdong2022](https://github.com/apdong2022)
|
||||||
* [beryl678](https://github.com/beryl678)
|
* [beryl678](https://github.com/beryl678)
|
||||||
|
* [Breeze-P](https://github.com/Breeze-P)
|
||||||
* [daviderli614](https://github.com/daviderli614)
|
* [daviderli614](https://github.com/daviderli614)
|
||||||
* [discord9](https://github.com/discord9)
|
* [discord9](https://github.com/discord9)
|
||||||
* [evenyag](https://github.com/evenyag)
|
* [evenyag](https://github.com/evenyag)
|
||||||
* [fengjiachun](https://github.com/fengjiachun)
|
* [fengjiachun](https://github.com/fengjiachun)
|
||||||
* [fengys1996](https://github.com/fengys1996)
|
* [fengys1996](https://github.com/fengys1996)
|
||||||
|
* [GrepTime](https://github.com/GrepTime)
|
||||||
* [holalengyu](https://github.com/holalengyu)
|
* [holalengyu](https://github.com/holalengyu)
|
||||||
* [killme2008](https://github.com/killme2008)
|
* [killme2008](https://github.com/killme2008)
|
||||||
|
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||||
* [nicecui](https://github.com/nicecui)
|
* [nicecui](https://github.com/nicecui)
|
||||||
* [paomian](https://github.com/paomian)
|
* [paomian](https://github.com/paomian)
|
||||||
* [shuiyisong](https://github.com/shuiyisong)
|
* [shuiyisong](https://github.com/shuiyisong)
|
||||||
@@ -34,11 +32,14 @@
|
|||||||
* [sunng87](https://github.com/sunng87)
|
* [sunng87](https://github.com/sunng87)
|
||||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||||
* [waynexia](https://github.com/waynexia)
|
* [waynexia](https://github.com/waynexia)
|
||||||
|
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||||
|
* [WenyXu](https://github.com/WenyXu)
|
||||||
* [xtang](https://github.com/xtang)
|
* [xtang](https://github.com/xtang)
|
||||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||||
* [zhongzc](https://github.com/zhongzc)
|
* [zhongzc](https://github.com/zhongzc)
|
||||||
|
* [ZonaHex](https://github.com/ZonaHex)
|
||||||
* [zyy17](https://github.com/zyy17)
|
* [zyy17](https://github.com/zyy17)
|
||||||
|
|
||||||
## All Contributors
|
## All Contributors
|
||||||
|
|
||||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||||
|
|||||||
235
Cargo.lock
generated
235
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "api"
|
name = "api"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-decimal",
|
"common-decimal",
|
||||||
@@ -432,7 +432,7 @@ dependencies = [
|
|||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
"chrono",
|
"chrono",
|
||||||
"half",
|
"half",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"lexical-core",
|
"lexical-core",
|
||||||
"num",
|
"num",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -710,7 +710,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "auth"
|
name = "auth"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1324,7 +1324,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cache"
|
name = "cache"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"catalog",
|
"catalog",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "catalog"
|
name = "catalog"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow",
|
"arrow",
|
||||||
@@ -1475,7 +1475,7 @@ version = "0.13.7"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6026d8cd82ada8bbcfe337805dd1eb6afdc9e80fa4d57e977b3a36315e0c5525"
|
checksum = "6026d8cd82ada8bbcfe337805dd1eb6afdc9e80fa4d57e977b3a36315e0c5525"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
"regex",
|
"regex",
|
||||||
@@ -1661,7 +1661,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cli"
|
name = "cli"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
@@ -1703,7 +1703,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -1712,7 +1712,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "client"
|
name = "client"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -1739,7 +1739,7 @@ dependencies = [
|
|||||||
"rand",
|
"rand",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"substrait 0.37.3",
|
"substrait 0.37.3",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -1780,7 +1780,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmd"
|
name = "cmd"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
@@ -1791,6 +1791,7 @@ dependencies = [
|
|||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
"cli",
|
"cli",
|
||||||
"client",
|
"client",
|
||||||
|
"colored",
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-catalog",
|
"common-catalog",
|
||||||
"common-config",
|
"common-config",
|
||||||
@@ -1825,7 +1826,10 @@ dependencies = [
|
|||||||
"mito2",
|
"mito2",
|
||||||
"moka",
|
"moka",
|
||||||
"nu-ansi-term",
|
"nu-ansi-term",
|
||||||
|
"object-store",
|
||||||
|
"parquet",
|
||||||
"plugins",
|
"plugins",
|
||||||
|
"pprof",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"prost 0.13.3",
|
"prost 0.13.3",
|
||||||
"query",
|
"query",
|
||||||
@@ -1841,7 +1845,7 @@ dependencies = [
|
|||||||
"similar-asserts",
|
"similar-asserts",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"temp-env",
|
"temp-env",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -1858,6 +1862,16 @@ version = "1.0.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0"
|
checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colored"
|
||||||
|
version = "2.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
|
||||||
|
dependencies = [
|
||||||
|
"lazy_static",
|
||||||
|
"windows-sys 0.59.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "combine"
|
name = "combine"
|
||||||
version = "4.6.7"
|
version = "4.6.7"
|
||||||
@@ -1887,7 +1901,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-base"
|
name = "common-base"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1909,11 +1923,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-catalog"
|
name = "common-catalog"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-config"
|
name = "common-config"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1938,7 +1952,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-datasource"
|
name = "common-datasource"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
@@ -1974,7 +1988,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-decimal"
|
name = "common-decimal"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bigdecimal 0.4.5",
|
"bigdecimal 0.4.5",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1987,7 +2001,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-error"
|
name = "common-error"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
@@ -1997,7 +2011,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-frontend"
|
name = "common-frontend"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2007,12 +2021,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-function"
|
name = "common-function"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
"approx 0.5.1",
|
"approx 0.5.1",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"bincode",
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-catalog",
|
"common-catalog",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2030,6 +2046,7 @@ dependencies = [
|
|||||||
"geo-types",
|
"geo-types",
|
||||||
"geohash",
|
"geohash",
|
||||||
"h3o",
|
"h3o",
|
||||||
|
"hyperloglogplus",
|
||||||
"jsonb",
|
"jsonb",
|
||||||
"nalgebra 0.33.2",
|
"nalgebra 0.33.2",
|
||||||
"num",
|
"num",
|
||||||
@@ -2046,12 +2063,13 @@ dependencies = [
|
|||||||
"store-api",
|
"store-api",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"uddsketch",
|
||||||
"wkt",
|
"wkt",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-greptimedb-telemetry"
|
name = "common-greptimedb-telemetry"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-runtime",
|
"common-runtime",
|
||||||
@@ -2068,7 +2086,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc"
|
name = "common-grpc"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -2096,7 +2114,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc-expr"
|
name = "common-grpc-expr"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -2115,7 +2133,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-macro"
|
name = "common-macro"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-query",
|
"common-query",
|
||||||
@@ -2129,7 +2147,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-mem-prof"
|
name = "common-mem-prof"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -2142,7 +2160,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-meta"
|
name = "common-meta"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"api",
|
"api",
|
||||||
@@ -2202,7 +2220,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-options"
|
name = "common-options"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
@@ -2211,11 +2229,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-plugins"
|
name = "common-plugins"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-pprof"
|
name = "common-pprof"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -2227,7 +2245,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure"
|
name = "common-procedure"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2254,7 +2272,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure-test"
|
name = "common-procedure-test"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-procedure",
|
"common-procedure",
|
||||||
@@ -2262,7 +2280,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-query"
|
name = "common-query"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2288,7 +2306,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-recordbatch"
|
name = "common-recordbatch"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2307,7 +2325,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-runtime"
|
name = "common-runtime"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -2337,7 +2355,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-telemetry"
|
name = "common-telemetry"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atty",
|
"atty",
|
||||||
"backtrace",
|
"backtrace",
|
||||||
@@ -2365,7 +2383,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-test-util"
|
name = "common-test-util"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"client",
|
"client",
|
||||||
"common-query",
|
"common-query",
|
||||||
@@ -2377,7 +2395,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-time"
|
name = "common-time"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -2395,7 +2413,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-version"
|
name = "common-version"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"build-data",
|
"build-data",
|
||||||
"const_format",
|
"const_format",
|
||||||
@@ -2405,7 +2423,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-wal"
|
name = "common-wal"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2972,7 +2990,7 @@ dependencies = [
|
|||||||
"chrono",
|
"chrono",
|
||||||
"half",
|
"half",
|
||||||
"hashbrown 0.14.5",
|
"hashbrown 0.14.5",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"libc",
|
"libc",
|
||||||
"object_store",
|
"object_store",
|
||||||
"parquet",
|
"parquet",
|
||||||
@@ -3032,7 +3050,7 @@ dependencies = [
|
|||||||
"datafusion-functions-aggregate-common",
|
"datafusion-functions-aggregate-common",
|
||||||
"datafusion-functions-window-common",
|
"datafusion-functions-window-common",
|
||||||
"datafusion-physical-expr-common",
|
"datafusion-physical-expr-common",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"paste",
|
"paste",
|
||||||
"recursive",
|
"recursive",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@@ -3154,7 +3172,7 @@ dependencies = [
|
|||||||
"datafusion-physical-expr-common",
|
"datafusion-physical-expr-common",
|
||||||
"datafusion-physical-plan",
|
"datafusion-physical-plan",
|
||||||
"half",
|
"half",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"log",
|
"log",
|
||||||
"parking_lot 0.12.3",
|
"parking_lot 0.12.3",
|
||||||
"paste",
|
"paste",
|
||||||
@@ -3205,7 +3223,7 @@ dependencies = [
|
|||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datafusion-expr",
|
"datafusion-expr",
|
||||||
"datafusion-physical-expr",
|
"datafusion-physical-expr",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
"log",
|
"log",
|
||||||
"recursive",
|
"recursive",
|
||||||
@@ -3230,7 +3248,7 @@ dependencies = [
|
|||||||
"datafusion-physical-expr-common",
|
"datafusion-physical-expr-common",
|
||||||
"half",
|
"half",
|
||||||
"hashbrown 0.14.5",
|
"hashbrown 0.14.5",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
"log",
|
"log",
|
||||||
"paste",
|
"paste",
|
||||||
@@ -3289,7 +3307,7 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"half",
|
"half",
|
||||||
"hashbrown 0.14.5",
|
"hashbrown 0.14.5",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
"log",
|
"log",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -3309,7 +3327,7 @@ dependencies = [
|
|||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datafusion-expr",
|
"datafusion-expr",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"log",
|
"log",
|
||||||
"recursive",
|
"recursive",
|
||||||
"regex",
|
"regex",
|
||||||
@@ -3336,7 +3354,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datanode"
|
name = "datanode"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -3388,7 +3406,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml 0.8.19",
|
"toml 0.8.19",
|
||||||
@@ -3397,7 +3415,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datatypes"
|
name = "datatypes"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4041,7 +4059,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "file-engine"
|
name = "file-engine"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -4151,7 +4169,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flow"
|
name = "flow"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow",
|
"arrow",
|
||||||
@@ -4212,7 +4230,7 @@ dependencies = [
|
|||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"strum 0.25.0",
|
"strum 0.25.0",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tonic 0.12.3",
|
"tonic 0.12.3",
|
||||||
@@ -4267,7 +4285,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "frontend"
|
name = "frontend"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -4695,7 +4713,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "greptime-proto"
|
name = "greptime-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a25adc8a01340231121646d8f0a29d0e92f45461#a25adc8a01340231121646d8f0a29d0e92f45461"
|
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.13.3",
|
"prost 0.13.3",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -4718,7 +4736,7 @@ dependencies = [
|
|||||||
"futures-sink",
|
"futures-sink",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.12",
|
"http 0.2.12",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -4737,7 +4755,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -5287,6 +5305,15 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hyperloglogplus"
|
||||||
|
version = "0.4.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "621debdf94dcac33e50475fdd76d34d5ea9c0362a834b9db08c3024696c1fbe3"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "i_float"
|
name = "i_float"
|
||||||
version = "1.3.1"
|
version = "1.3.1"
|
||||||
@@ -5526,7 +5553,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "index"
|
name = "index"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
@@ -5575,9 +5602,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "indexmap"
|
name = "indexmap"
|
||||||
version = "2.6.0"
|
version = "2.7.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
|
checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"equivalent",
|
"equivalent",
|
||||||
"hashbrown 0.15.2",
|
"hashbrown 0.15.2",
|
||||||
@@ -5591,7 +5618,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"is-terminal",
|
"is-terminal",
|
||||||
"itoa",
|
"itoa",
|
||||||
"log",
|
"log",
|
||||||
@@ -5938,7 +5965,7 @@ version = "0.4.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d"
|
checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -6318,7 +6345,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-query"
|
name = "log-query"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -6330,7 +6357,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-store"
|
name = "log-store"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -6421,7 +6448,7 @@ dependencies = [
|
|||||||
"cactus",
|
"cactus",
|
||||||
"cfgrammar",
|
"cfgrammar",
|
||||||
"filetime",
|
"filetime",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lrtable",
|
"lrtable",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
@@ -6623,7 +6650,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-client"
|
name = "meta-client"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -6650,7 +6677,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-srv"
|
name = "meta-srv"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -6736,7 +6763,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "metric-engine"
|
name = "metric-engine"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -6834,7 +6861,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mito2"
|
name = "mito2"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -7531,7 +7558,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "object-store"
|
name = "object-store"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -7662,7 +7689,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
@@ -7780,7 +7807,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "operator"
|
name = "operator"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -7828,7 +7855,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -8065,7 +8092,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "partition"
|
name = "partition"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -8234,7 +8261,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
|
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"fixedbitset",
|
"fixedbitset",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -8333,7 +8360,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pipeline"
|
name = "pipeline"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -8473,7 +8500,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "plugins"
|
name = "plugins"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"auth",
|
"auth",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -8735,7 +8762,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "promql"
|
name = "promql"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -8980,7 +9007,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "puffin"
|
name = "puffin"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-compression 0.4.13",
|
"async-compression 0.4.13",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -9021,7 +9048,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "query"
|
name = "query"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -9086,7 +9113,7 @@ dependencies = [
|
|||||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||||
"statrs",
|
"statrs",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -10325,7 +10352,7 @@ version = "1.0.137"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
|
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"itoa",
|
"itoa",
|
||||||
"memchr",
|
"memchr",
|
||||||
"ryu",
|
"ryu",
|
||||||
@@ -10396,7 +10423,7 @@ dependencies = [
|
|||||||
"chrono",
|
"chrono",
|
||||||
"hex",
|
"hex",
|
||||||
"indexmap 1.9.3",
|
"indexmap 1.9.3",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@@ -10422,7 +10449,7 @@ version = "0.9.34+deprecated"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"itoa",
|
"itoa",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -10431,7 +10458,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "servers"
|
name = "servers"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -10483,6 +10510,7 @@ dependencies = [
|
|||||||
"humantime",
|
"humantime",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
"hyper 1.4.1",
|
"hyper 1.4.1",
|
||||||
|
"indexmap 2.7.1",
|
||||||
"influxdb_line_protocol",
|
"influxdb_line_protocol",
|
||||||
"itertools 0.10.5",
|
"itertools 0.10.5",
|
||||||
"json5",
|
"json5",
|
||||||
@@ -10547,7 +10575,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "session"
|
name = "session"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -10856,7 +10884,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sql"
|
name = "sql"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -10893,12 +10921,12 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlness"
|
name = "sqlness"
|
||||||
version = "0.6.1"
|
version = "0.6.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/CeresDB/sqlness.git?rev=bb91f31ff58993e07ea89845791235138283a24c#bb91f31ff58993e07ea89845791235138283a24c"
|
||||||
checksum = "308a7338f2211813d6e9da117e9b9b7aee5d072872d11a934002fd2bd4ab5276"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"derive_builder 0.11.2",
|
"derive_builder 0.11.2",
|
||||||
"duration-str",
|
"duration-str",
|
||||||
|
"futures",
|
||||||
"minijinja",
|
"minijinja",
|
||||||
"prettydiff",
|
"prettydiff",
|
||||||
"regex",
|
"regex",
|
||||||
@@ -10910,7 +10938,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlness-runner"
|
name = "sqlness-runner"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -10924,6 +10952,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"local-ip-address",
|
"local-ip-address",
|
||||||
"mysql",
|
"mysql",
|
||||||
|
"num_cpus",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@@ -11023,7 +11052,7 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"hashbrown 0.15.2",
|
"hashbrown 0.15.2",
|
||||||
"hashlink",
|
"hashlink",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"log",
|
"log",
|
||||||
"memchr",
|
"memchr",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -11226,7 +11255,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "store-api"
|
name = "store-api"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -11356,7 +11385,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "substrait"
|
name = "substrait"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -11537,7 +11566,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "table"
|
name = "table"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -11788,7 +11817,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-fuzz"
|
name = "tests-fuzz"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arbitrary",
|
"arbitrary",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -11832,7 +11861,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-integration"
|
name = "tests-integration"
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -11898,7 +11927,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlx",
|
"sqlx",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.12.0",
|
"substrait 0.12.2",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"time",
|
"time",
|
||||||
@@ -12319,7 +12348,7 @@ version = "0.19.15"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
"winnow 0.5.40",
|
"winnow 0.5.40",
|
||||||
]
|
]
|
||||||
@@ -12330,7 +12359,7 @@ version = "0.22.22"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
|
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_spanned",
|
"serde_spanned",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
@@ -12468,7 +12497,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"hdrhistogram",
|
"hdrhistogram",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.7.1",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"slab",
|
"slab",
|
||||||
"sync_wrapper 1.0.1",
|
"sync_wrapper 1.0.1",
|
||||||
@@ -12956,6 +12985,14 @@ version = "0.1.7"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
|
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "uddsketch"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/GreptimeTeam/timescaledb-toolkit.git?rev=84828fe8fb494a6a61412a3da96517fc80f7bb20#84828fe8fb494a6a61412a3da96517fc80f7bb20"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unescaper"
|
name = "unescaper"
|
||||||
version = "0.1.5"
|
version = "0.1.5"
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -129,7 +129,7 @@ etcd-client = "0.14"
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a25adc8a01340231121646d8f0a29d0e92f45461" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
|
|||||||
@@ -319,6 +319,7 @@
|
|||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
|
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
|||||||
@@ -231,6 +231,7 @@ overwrite_entry_start_id = false
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
|
|||||||
@@ -50,6 +50,9 @@ use_memory_store = false
|
|||||||
## - Using shared storage (e.g., s3).
|
## - Using shared storage (e.g., s3).
|
||||||
enable_region_failover = false
|
enable_region_failover = false
|
||||||
|
|
||||||
|
## Max allowed idle time before removing node info from metasrv memory.
|
||||||
|
node_max_idle_time = "24hours"
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||||
#+ enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
|
|||||||
@@ -318,6 +318,7 @@ retry_delay = "500ms"
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
|
|||||||
156
cyborg/bin/bump-versions.ts
Normal file
156
cyborg/bin/bump-versions.ts
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import {obtainClient} from "@/common";
|
||||||
|
|
||||||
|
interface RepoConfig {
|
||||||
|
tokenEnv: string;
|
||||||
|
repo: string;
|
||||||
|
workflowLogic: (version: string) => [string, string] | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const REPO_CONFIGS: Record<string, RepoConfig> = {
|
||||||
|
website: {
|
||||||
|
tokenEnv: "WEBSITE_REPO_TOKEN",
|
||||||
|
repo: "website",
|
||||||
|
workflowLogic: (version: string) => {
|
||||||
|
// Skip nightly versions for website
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
console.log('Nightly version detected for website, skipping workflow trigger.');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
},
|
||||||
|
demo: {
|
||||||
|
tokenEnv: "DEMO_REPO_TOKEN",
|
||||||
|
repo: "demo-scene",
|
||||||
|
workflowLogic: (version: string) => {
|
||||||
|
// Skip nightly versions for demo
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
console.log('Nightly version detected for demo, skipping workflow trigger.');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
},
|
||||||
|
docs: {
|
||||||
|
tokenEnv: "DOCS_REPO_TOKEN",
|
||||||
|
repo: "docs",
|
||||||
|
workflowLogic: (version: string) => {
|
||||||
|
// Check if it's a nightly version
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
return ['bump-nightly-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const parts = version.split('.');
|
||||||
|
if (parts.length !== 3) {
|
||||||
|
throw new Error('Invalid version format');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If patch version (last number) is 0, it's a major version
|
||||||
|
// Return only major.minor version
|
||||||
|
if (parts[2] === '0') {
|
||||||
|
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise it's a patch version, use full version
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
|
||||||
|
const client = obtainClient(repoConfig.tokenEnv);
|
||||||
|
try {
|
||||||
|
await client.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: "GreptimeTeam",
|
||||||
|
repo: repoConfig.repo,
|
||||||
|
workflow_id: workflowId,
|
||||||
|
ref: "main",
|
||||||
|
inputs: {
|
||||||
|
version,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function processRepo(repoName: string, version: string) {
|
||||||
|
const repoConfig = REPO_CONFIGS[repoName];
|
||||||
|
if (!repoConfig) {
|
||||||
|
throw new Error(`Unknown repository: ${repoName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const workflowResult = repoConfig.workflowLogic(version);
|
||||||
|
if (workflowResult === null) {
|
||||||
|
// Skip this repo (e.g., nightly version for website)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const [workflowId, apiVersion] = workflowResult;
|
||||||
|
await triggerWorkflow(repoConfig, workflowId, apiVersion);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
const version = process.env.VERSION;
|
||||||
|
if (!version) {
|
||||||
|
core.setFailed("VERSION environment variable is required");
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove 'v' prefix if exists
|
||||||
|
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||||
|
|
||||||
|
// Get target repositories from environment variable
|
||||||
|
// Default to both if not specified
|
||||||
|
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
|
||||||
|
|
||||||
|
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
|
||||||
|
|
||||||
|
const errors: string[] = [];
|
||||||
|
|
||||||
|
// Process each repository
|
||||||
|
for (const repo of targetRepos) {
|
||||||
|
try {
|
||||||
|
await processRepo(repo, cleanVersion);
|
||||||
|
} catch (error) {
|
||||||
|
errors.push(`${repo}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (errors.length > 0) {
|
||||||
|
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('All repositories processed successfully');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute main function
|
||||||
|
main().catch((error) => {
|
||||||
|
core.setFailed(`Unexpected error: ${error.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
@@ -55,12 +55,25 @@ async function main() {
|
|||||||
await client.rest.issues.addLabels({
|
await client.rest.issues.addLabels({
|
||||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Get available assignees for the docs repo
|
||||||
|
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
||||||
|
owner: 'GreptimeTeam',
|
||||||
|
repo: 'docs',
|
||||||
|
})
|
||||||
|
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
||||||
|
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
||||||
|
|
||||||
|
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
||||||
|
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
||||||
|
core.info(`Assigning issue to: ${assignee}`)
|
||||||
|
|
||||||
await docsClient.rest.issues.create({
|
await docsClient.rest.issues.create({
|
||||||
owner: 'GreptimeTeam',
|
owner: 'GreptimeTeam',
|
||||||
repo: 'docs',
|
repo: 'docs',
|
||||||
title: `Update docs for ${title}`,
|
title: `Update docs for ${title}`,
|
||||||
body: `A document change request is generated from ${html_url}`,
|
body: `A document change request is generated from ${html_url}`,
|
||||||
assignee: actor,
|
assignee: assignee,
|
||||||
}).then((res) => {
|
}).then((res) => {
|
||||||
core.info(`Created issue ${res.data}`)
|
core.info(`Created issue ${res.data}`)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -53,6 +53,54 @@ get_arch_type() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Verify SHA256 checksum
|
||||||
|
verify_sha256() {
|
||||||
|
file="$1"
|
||||||
|
expected_sha256="$2"
|
||||||
|
|
||||||
|
if command -v sha256sum >/dev/null 2>&1; then
|
||||||
|
actual_sha256=$(sha256sum "$file" | cut -d' ' -f1)
|
||||||
|
elif command -v shasum >/dev/null 2>&1; then
|
||||||
|
actual_sha256=$(shasum -a 256 "$file" | cut -d' ' -f1)
|
||||||
|
else
|
||||||
|
echo "Warning: No SHA256 verification tool found (sha256sum or shasum). Skipping checksum verification."
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$actual_sha256" = "$expected_sha256" ]; then
|
||||||
|
echo "SHA256 checksum verified successfully."
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo "Error: SHA256 checksum verification failed!"
|
||||||
|
echo "Expected: $expected_sha256"
|
||||||
|
echo "Actual: $actual_sha256"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prompt for user confirmation (compatible with different shells)
|
||||||
|
prompt_confirmation() {
|
||||||
|
message="$1"
|
||||||
|
printf "%s (y/N): " "$message"
|
||||||
|
|
||||||
|
# Try to read user input, fallback if read fails
|
||||||
|
answer=""
|
||||||
|
if read answer </dev/tty 2>/dev/null; then
|
||||||
|
case "$answer" in
|
||||||
|
[Yy]|[Yy][Ee][Ss])
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "Cannot read user input. Defaulting to No."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
download_artifact() {
|
download_artifact() {
|
||||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||||
# Use the latest stable released version.
|
# Use the latest stable released version.
|
||||||
@@ -71,17 +119,104 @@ download_artifact() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||||
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
|
PKG_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}"
|
||||||
|
PACKAGE_NAME="${PKG_NAME}.tar.gz"
|
||||||
|
SHA256_FILE="${PKG_NAME}.sha256sum"
|
||||||
|
|
||||||
if [ -n "${PACKAGE_NAME}" ]; then
|
if [ -n "${PACKAGE_NAME}" ]; then
|
||||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
|
# Check if files already exist and prompt for override
|
||||||
|
if [ -f "${PACKAGE_NAME}" ]; then
|
||||||
|
echo "File ${PACKAGE_NAME} already exists."
|
||||||
|
if prompt_confirmation "Do you want to override it?"; then
|
||||||
|
echo "Overriding existing file..."
|
||||||
|
rm -f "${PACKAGE_NAME}"
|
||||||
|
else
|
||||||
|
echo "Skipping download. Using existing file."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "${BIN}" ]; then
|
||||||
|
echo "Binary ${BIN} already exists."
|
||||||
|
if prompt_confirmation "Do you want to override it?"; then
|
||||||
|
echo "Will override existing binary..."
|
||||||
|
rm -f "${BIN}"
|
||||||
|
else
|
||||||
|
echo "Installation cancelled."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download package if not exists
|
||||||
|
if [ ! -f "${PACKAGE_NAME}" ]; then
|
||||||
|
echo "Downloading ${PACKAGE_NAME}..."
|
||||||
|
# Use curl instead of wget for better compatibility
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
if ! curl -L -o "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
|
||||||
|
echo "Error: Failed to download ${PACKAGE_NAME}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
if ! wget -O "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
|
||||||
|
echo "Error: Failed to download ${PACKAGE_NAME}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: Neither curl nor wget is available for downloading."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download and verify SHA256 checksum
|
||||||
|
echo "Downloading SHA256 checksum..."
|
||||||
|
sha256_download_success=0
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
if curl -L -s -o "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
|
||||||
|
sha256_download_success=1
|
||||||
|
fi
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
if wget -q -O "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
|
||||||
|
sha256_download_success=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $sha256_download_success -eq 1 ] && [ -f "${SHA256_FILE}" ]; then
|
||||||
|
expected_sha256=$(cat "${SHA256_FILE}" | cut -d' ' -f1)
|
||||||
|
if [ -n "$expected_sha256" ]; then
|
||||||
|
if ! verify_sha256 "${PACKAGE_NAME}" "${expected_sha256}"; then
|
||||||
|
echo "SHA256 verification failed. Removing downloaded file."
|
||||||
|
rm -f "${PACKAGE_NAME}" "${SHA256_FILE}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Warning: Could not parse SHA256 checksum from file."
|
||||||
|
fi
|
||||||
|
rm -f "${SHA256_FILE}"
|
||||||
|
else
|
||||||
|
echo "Warning: Could not download SHA256 checksum file. Skipping verification."
|
||||||
|
fi
|
||||||
|
|
||||||
# Extract the binary and clean the rest.
|
# Extract the binary and clean the rest.
|
||||||
tar xvf "${PACKAGE_NAME}" && \
|
echo "Extracting ${PACKAGE_NAME}..."
|
||||||
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
|
if ! tar xf "${PACKAGE_NAME}"; then
|
||||||
rm -r "${PACKAGE_NAME}" && \
|
echo "Error: Failed to extract ${PACKAGE_NAME}"
|
||||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
exit 1
|
||||||
echo "Run './${BIN} --help' to get started"
|
fi
|
||||||
|
|
||||||
|
# Find the binary in the extracted directory
|
||||||
|
extracted_dir="${PACKAGE_NAME%.tar.gz}"
|
||||||
|
if [ -f "${extracted_dir}/${BIN}" ]; then
|
||||||
|
mv "${extracted_dir}/${BIN}" "${PWD}/"
|
||||||
|
rm -f "${PACKAGE_NAME}"
|
||||||
|
rm -rf "${extracted_dir}"
|
||||||
|
chmod +x "${BIN}"
|
||||||
|
echo "Installation completed successfully!"
|
||||||
|
echo "Run './${BIN} --help' to get started"
|
||||||
|
else
|
||||||
|
echo "Error: Binary ${BIN} not found in extracted archive"
|
||||||
|
rm -f "${PACKAGE_NAME}"
|
||||||
|
rm -rf "${extracted_dir}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,10 @@ default-run = "greptime"
|
|||||||
name = "greptime"
|
name = "greptime"
|
||||||
path = "src/bin/greptime.rs"
|
path = "src/bin/greptime.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "objbench"
|
||||||
|
path = "src/bin/objbench.rs"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["servers/pprof", "servers/mem-prof"]
|
default = ["servers/pprof", "servers/mem-prof"]
|
||||||
tokio-console = ["common-telemetry/tokio-console"]
|
tokio-console = ["common-telemetry/tokio-console"]
|
||||||
@@ -20,6 +24,7 @@ workspace = true
|
|||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
auth.workspace = true
|
auth.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
|
colored = "2.0"
|
||||||
cache.workspace = true
|
cache.workspace = true
|
||||||
catalog.workspace = true
|
catalog.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
@@ -55,6 +60,9 @@ futures.workspace = true
|
|||||||
human-panic = "2.0"
|
human-panic = "2.0"
|
||||||
humantime.workspace = true
|
humantime.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
|
object-store.workspace = true
|
||||||
|
parquet = "53"
|
||||||
|
pprof = "0.14"
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
meta-srv.workspace = true
|
meta-srv.workspace = true
|
||||||
metric-engine.workspace = true
|
metric-engine.workspace = true
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
|||||||
use common_version::version;
|
use common_version::version;
|
||||||
use servers::install_ring_crypto_provider;
|
use servers::install_ring_crypto_provider;
|
||||||
|
|
||||||
|
pub mod objbench;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "greptime", author, version, long_version = version(), about)]
|
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||||
#[command(propagate_version = true)]
|
#[command(propagate_version = true)]
|
||||||
|
|||||||
602
src/cmd/src/bin/objbench.rs
Normal file
602
src/cmd/src/bin/objbench.rs
Normal file
@@ -0,0 +1,602 @@
|
|||||||
|
// Copyright 2025 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use cmd::error::{self, Result};
|
||||||
|
use colored::Colorize;
|
||||||
|
use datanode::config::ObjectStoreConfig;
|
||||||
|
use mito2::config::{FulltextIndexConfig, MitoConfig, Mode};
|
||||||
|
use mito2::read::Source;
|
||||||
|
use mito2::sst::file::{FileHandle, FileId, FileMeta};
|
||||||
|
use mito2::sst::file_purger::{FilePurger, FilePurgerRef, PurgeRequest};
|
||||||
|
use mito2::sst::parquet::{WriteOptions, PARQUET_METADATA_KEY};
|
||||||
|
use mito2::{build_access_layer, Metrics, OperationType, SstWriteRequest};
|
||||||
|
use object_store::ObjectStore;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use store_api::metadata::{RegionMetadata, RegionMetadataRef};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
pub async fn main() {
|
||||||
|
// common_telemetry::init_default_ut_logging();
|
||||||
|
let cmd = Command::parse();
|
||||||
|
if let Err(e) = cmd.run().await {
|
||||||
|
eprintln!("{}: {}", "Error".red().bold(), e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct StorageConfigWrapper {
|
||||||
|
storage: StorageConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Storage engine config
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct StorageConfig {
|
||||||
|
/// The working directory of database
|
||||||
|
pub data_home: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub store: ObjectStoreConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct Command {
|
||||||
|
/// Path to the object-store config file (TOML). Must deserialize into datanode::config::ObjectStoreConfig.
|
||||||
|
#[clap(long, value_name = "FILE")]
|
||||||
|
pub config: PathBuf,
|
||||||
|
|
||||||
|
/// Source SST file path in object-store (e.g. "region_dir/<uuid>.parquet").
|
||||||
|
#[clap(long, value_name = "PATH")]
|
||||||
|
pub source: String,
|
||||||
|
|
||||||
|
/// Target SST file path in object-store; its parent directory is used as destination region dir.
|
||||||
|
#[clap(long, value_name = "PATH")]
|
||||||
|
pub target: String,
|
||||||
|
|
||||||
|
/// Verbose output
|
||||||
|
#[clap(short, long, default_value_t = false)]
|
||||||
|
pub verbose: bool,
|
||||||
|
|
||||||
|
/// Output file path for pprof flamegraph (enables profiling)
|
||||||
|
#[clap(long, value_name = "FILE")]
|
||||||
|
pub pprof_file: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Command {
|
||||||
|
pub async fn run(&self) -> Result<()> {
|
||||||
|
if self.verbose {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("{}", "Starting objbench...".cyan().bold());
|
||||||
|
|
||||||
|
// Build object store from config
|
||||||
|
let cfg_str = std::fs::read_to_string(&self.config).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("failed to read config {}: {e}", self.config.display()),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let store_cfg: StorageConfigWrapper = toml::from_str(&cfg_str).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("failed to parse config {}: {e}", self.config.display()),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let object_store = build_object_store(&store_cfg.storage).await?;
|
||||||
|
println!("{} Object store initialized", "✓".green());
|
||||||
|
|
||||||
|
// Prepare source identifiers
|
||||||
|
let (src_region_dir, src_file_id) = split_sst_path(&self.source)?;
|
||||||
|
println!("{} Source path parsed: {}", "✓".green(), self.source);
|
||||||
|
|
||||||
|
// Load parquet metadata to extract RegionMetadata and file stats
|
||||||
|
println!("{}", "Loading parquet metadata...".yellow());
|
||||||
|
let file_size = object_store
|
||||||
|
.stat(&self.source)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("stat failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.content_length();
|
||||||
|
let parquet_meta = load_parquet_metadata(object_store.clone(), &self.source, file_size)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("read parquet metadata failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let region_meta = extract_region_metadata(&self.source, &parquet_meta)?;
|
||||||
|
let num_rows = parquet_meta.file_metadata().num_rows() as u64;
|
||||||
|
let num_row_groups = parquet_meta.num_row_groups() as u64;
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"{} Metadata loaded - rows: {}, size: {} bytes",
|
||||||
|
"✓".green(),
|
||||||
|
num_rows,
|
||||||
|
file_size
|
||||||
|
);
|
||||||
|
|
||||||
|
// Build a FileHandle for the source file
|
||||||
|
let file_meta = FileMeta {
|
||||||
|
region_id: region_meta.region_id,
|
||||||
|
file_id: src_file_id,
|
||||||
|
time_range: Default::default(),
|
||||||
|
level: 0,
|
||||||
|
file_size,
|
||||||
|
available_indexes: Default::default(),
|
||||||
|
index_file_size: 0,
|
||||||
|
num_rows,
|
||||||
|
num_row_groups,
|
||||||
|
sequence: None,
|
||||||
|
};
|
||||||
|
let src_handle = FileHandle::new(file_meta, new_noop_file_purger());
|
||||||
|
|
||||||
|
// Build the reader for a single file via ParquetReaderBuilder
|
||||||
|
println!("{}", "Building reader...".yellow());
|
||||||
|
let (_src_access_layer, _cache_manager) =
|
||||||
|
build_access_layer_simple(src_region_dir.clone(), object_store.clone()).await?;
|
||||||
|
let reader_build_start = Instant::now();
|
||||||
|
let reader = mito2::sst::parquet::reader::ParquetReaderBuilder::new(
|
||||||
|
src_region_dir.clone(),
|
||||||
|
src_handle.clone(),
|
||||||
|
object_store.clone(),
|
||||||
|
)
|
||||||
|
.expected_metadata(Some(region_meta.clone()))
|
||||||
|
.build()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("build reader failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let reader_build_elapsed = reader_build_start.elapsed();
|
||||||
|
let total_rows = reader.parquet_metadata().file_metadata().num_rows();
|
||||||
|
println!("{} Reader built in {:?}", "✓".green(), reader_build_elapsed);
|
||||||
|
|
||||||
|
// Prepare target access layer for writing
|
||||||
|
println!("{}", "Preparing target access layer...".yellow());
|
||||||
|
let (tgt_access_layer, tgt_cache_manager) =
|
||||||
|
build_access_layer_simple(self.target.clone(), object_store.clone()).await?;
|
||||||
|
|
||||||
|
// Build write request
|
||||||
|
let fulltext_index_config = FulltextIndexConfig {
|
||||||
|
create_on_compaction: Mode::Disable,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let write_opts = WriteOptions::default();
|
||||||
|
let write_req = SstWriteRequest {
|
||||||
|
op_type: OperationType::Compact,
|
||||||
|
metadata: region_meta,
|
||||||
|
source: Source::Reader(Box::new(reader)),
|
||||||
|
cache_manager: tgt_cache_manager,
|
||||||
|
storage: None,
|
||||||
|
max_sequence: None,
|
||||||
|
index_options: Default::default(),
|
||||||
|
inverted_index_config: MitoConfig::default().inverted_index,
|
||||||
|
fulltext_index_config,
|
||||||
|
bloom_filter_index_config: MitoConfig::default().bloom_filter_index,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Write SST
|
||||||
|
println!("{}", "Writing SST...".yellow());
|
||||||
|
let mut metrics = Metrics::default();
|
||||||
|
|
||||||
|
// Start profiling if pprof_file is specified
|
||||||
|
#[cfg(unix)]
|
||||||
|
let profiler_guard = if self.pprof_file.is_some() {
|
||||||
|
println!("{} Starting profiling...", "⚡".yellow());
|
||||||
|
Some(
|
||||||
|
pprof::ProfilerGuardBuilder::default()
|
||||||
|
.frequency(99)
|
||||||
|
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
|
||||||
|
.build()
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to start profiler: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
if self.pprof_file.is_some() {
|
||||||
|
eprintln!(
|
||||||
|
"{}: Profiling is not supported on this platform",
|
||||||
|
"Warning".yellow()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let write_start = Instant::now();
|
||||||
|
let infos = tgt_access_layer
|
||||||
|
.write_sst(write_req, &write_opts, &mut metrics)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("write_sst failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let write_elapsed = write_start.elapsed();
|
||||||
|
|
||||||
|
// Stop profiling and generate flamegraph if enabled
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let (Some(guard), Some(pprof_file)) = (profiler_guard, &self.pprof_file) {
|
||||||
|
println!("{} Generating flamegraph...", "🔥".yellow());
|
||||||
|
match guard.report().build() {
|
||||||
|
Ok(report) => {
|
||||||
|
let mut flamegraph_data = Vec::new();
|
||||||
|
if let Err(e) = report.flamegraph(&mut flamegraph_data) {
|
||||||
|
eprintln!(
|
||||||
|
"{}: Failed to generate flamegraph: {}",
|
||||||
|
"Warning".yellow(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
} else if let Err(e) = std::fs::write(pprof_file, flamegraph_data) {
|
||||||
|
eprintln!(
|
||||||
|
"{}: Failed to write flamegraph to {}: {}",
|
||||||
|
"Warning".yellow(),
|
||||||
|
pprof_file.display(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
println!(
|
||||||
|
"{} Flamegraph saved to {}",
|
||||||
|
"✓".green(),
|
||||||
|
pprof_file.display().to_string().cyan()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!(
|
||||||
|
"{}: Failed to generate pprof report: {}",
|
||||||
|
"Warning".yellow(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(infos.len(), 1);
|
||||||
|
let dst_file_id = infos[0].file_id;
|
||||||
|
let dst_file_path = format!("{}{}", self.target, dst_file_id.as_parquet(),);
|
||||||
|
|
||||||
|
// Report results with ANSI colors
|
||||||
|
println!("\n{} {}", "Write complete!".green().bold(), "✓".green());
|
||||||
|
println!(" {}: {}", "Destination file".bold(), dst_file_path.cyan());
|
||||||
|
println!(" {}: {}", "Rows".bold(), total_rows.to_string().cyan());
|
||||||
|
println!(
|
||||||
|
" {}: {}",
|
||||||
|
"File size".bold(),
|
||||||
|
format!("{} bytes", file_size).cyan()
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
" {}: {:?}",
|
||||||
|
"Reader build time".bold(),
|
||||||
|
reader_build_elapsed
|
||||||
|
);
|
||||||
|
println!(" {}: {:?}", "Total time".bold(), write_elapsed);
|
||||||
|
|
||||||
|
// Print metrics in a formatted way
|
||||||
|
println!(
|
||||||
|
" {}: {:?}, sum: {:?}",
|
||||||
|
"Metrics".bold(),
|
||||||
|
metrics,
|
||||||
|
metrics.sum()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Print infos
|
||||||
|
println!(" {}: {:?}", "Index".bold(), infos[0].index_metadata);
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
println!("\n{}", "Cleaning up...".yellow());
|
||||||
|
object_store.delete(&dst_file_path).await.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to delete dest file {}: {}", dst_file_path, e),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
println!("{} Temporary file deleted", "✓".green());
|
||||||
|
|
||||||
|
println!("\n{}", "Benchmark completed successfully!".green().bold());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn split_sst_path(path: &str) -> Result<(String, FileId)> {
|
||||||
|
let p = Path::new(path);
|
||||||
|
let file_name = p.file_name().and_then(|s| s.to_str()).ok_or_else(|| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: "invalid source path".to_string(),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let uuid_str = file_name.strip_suffix(".parquet").ok_or_else(|| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: "expect .parquet file".to_string(),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let file_id = FileId::parse_str(uuid_str).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("invalid file id: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let parent = p
|
||||||
|
.parent()
|
||||||
|
.and_then(|s| s.to_str())
|
||||||
|
.unwrap_or("")
|
||||||
|
.to_string();
|
||||||
|
Ok((parent, file_id))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_region_metadata(
|
||||||
|
file_path: &str,
|
||||||
|
meta: &parquet::file::metadata::ParquetMetaData,
|
||||||
|
) -> Result<RegionMetadataRef> {
|
||||||
|
use parquet::format::KeyValue;
|
||||||
|
let kvs: Option<&Vec<KeyValue>> = meta.file_metadata().key_value_metadata();
|
||||||
|
let Some(kvs) = kvs else {
|
||||||
|
return Err(error::IllegalConfigSnafu {
|
||||||
|
msg: format!("{file_path}: missing parquet key_value metadata"),
|
||||||
|
}
|
||||||
|
.build());
|
||||||
|
};
|
||||||
|
let json = kvs
|
||||||
|
.iter()
|
||||||
|
.find(|kv| kv.key == PARQUET_METADATA_KEY)
|
||||||
|
.and_then(|kv| kv.value.as_ref())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("{file_path}: key {PARQUET_METADATA_KEY} not found or empty"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let region: RegionMetadata = RegionMetadata::from_json(json).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("invalid region metadata json: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
Ok(std::sync::Arc::new(region))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_object_store(sc: &StorageConfig) -> Result<ObjectStore> {
|
||||||
|
use datanode::config::ObjectStoreConfig::*;
|
||||||
|
let oss = &sc.store;
|
||||||
|
match oss {
|
||||||
|
File(_) => {
|
||||||
|
use object_store::services::Fs;
|
||||||
|
let builder = Fs::default().root(&sc.data_home);
|
||||||
|
Ok(ObjectStore::new(builder)
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("init fs backend failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.finish())
|
||||||
|
}
|
||||||
|
S3(s3) => {
|
||||||
|
use common_base::secrets::ExposeSecret;
|
||||||
|
use object_store::services::S3;
|
||||||
|
use object_store::util;
|
||||||
|
let root = util::normalize_dir(&s3.root);
|
||||||
|
let mut builder = S3::default()
|
||||||
|
.root(&root)
|
||||||
|
.bucket(&s3.bucket)
|
||||||
|
.access_key_id(s3.access_key_id.expose_secret())
|
||||||
|
.secret_access_key(s3.secret_access_key.expose_secret());
|
||||||
|
if let Some(ep) = &s3.endpoint {
|
||||||
|
builder = builder.endpoint(ep);
|
||||||
|
}
|
||||||
|
if let Some(region) = &s3.region {
|
||||||
|
builder = builder.region(region);
|
||||||
|
}
|
||||||
|
if s3.enable_virtual_host_style {
|
||||||
|
builder = builder.enable_virtual_host_style();
|
||||||
|
}
|
||||||
|
Ok(ObjectStore::new(builder)
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("init s3 backend failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.finish())
|
||||||
|
}
|
||||||
|
Oss(oss) => {
|
||||||
|
use common_base::secrets::ExposeSecret;
|
||||||
|
use object_store::services::Oss;
|
||||||
|
use object_store::util;
|
||||||
|
let root = util::normalize_dir(&oss.root);
|
||||||
|
let builder = Oss::default()
|
||||||
|
.root(&root)
|
||||||
|
.bucket(&oss.bucket)
|
||||||
|
.endpoint(&oss.endpoint)
|
||||||
|
.access_key_id(oss.access_key_id.expose_secret())
|
||||||
|
.access_key_secret(oss.access_key_secret.expose_secret());
|
||||||
|
Ok(ObjectStore::new(builder)
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("init oss backend failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.finish())
|
||||||
|
}
|
||||||
|
Azblob(az) => {
|
||||||
|
use common_base::secrets::ExposeSecret;
|
||||||
|
use object_store::services::Azblob;
|
||||||
|
use object_store::util;
|
||||||
|
let root = util::normalize_dir(&az.root);
|
||||||
|
let mut builder = Azblob::default()
|
||||||
|
.root(&root)
|
||||||
|
.container(&az.container)
|
||||||
|
.endpoint(&az.endpoint)
|
||||||
|
.account_name(az.account_name.expose_secret())
|
||||||
|
.account_key(az.account_key.expose_secret());
|
||||||
|
if let Some(token) = &az.sas_token {
|
||||||
|
builder = builder.sas_token(token);
|
||||||
|
}
|
||||||
|
Ok(ObjectStore::new(builder)
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("init azblob backend failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.finish())
|
||||||
|
}
|
||||||
|
Gcs(gcs) => {
|
||||||
|
use common_base::secrets::ExposeSecret;
|
||||||
|
use object_store::services::Gcs;
|
||||||
|
use object_store::util;
|
||||||
|
let root = util::normalize_dir(&gcs.root);
|
||||||
|
let builder = Gcs::default()
|
||||||
|
.root(&root)
|
||||||
|
.bucket(&gcs.bucket)
|
||||||
|
.scope(&gcs.scope)
|
||||||
|
.credential_path(gcs.credential_path.expose_secret())
|
||||||
|
.credential(gcs.credential.expose_secret())
|
||||||
|
.endpoint(&gcs.endpoint);
|
||||||
|
Ok(ObjectStore::new(builder)
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("init gcs backend failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.finish())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_access_layer_simple(
|
||||||
|
region_dir: String,
|
||||||
|
object_store: ObjectStore,
|
||||||
|
) -> Result<(
|
||||||
|
std::sync::Arc<mito2::AccessLayer>,
|
||||||
|
std::sync::Arc<mito2::CacheManager>,
|
||||||
|
)> {
|
||||||
|
// Minimal index aux path setup
|
||||||
|
let mut mito_cfg = MitoConfig::default();
|
||||||
|
// Use a temporary directory as aux path
|
||||||
|
let data_home = std::env::temp_dir().join("greptime_objbench");
|
||||||
|
let _ = std::fs::create_dir_all(&data_home);
|
||||||
|
let _ = mito_cfg.index.sanitize(
|
||||||
|
data_home.to_str().unwrap_or("/tmp"),
|
||||||
|
&mito_cfg.inverted_index,
|
||||||
|
);
|
||||||
|
let access_layer = build_access_layer(®ion_dir, object_store, &mito_cfg)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("build_access_layer failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
Ok((
|
||||||
|
access_layer,
|
||||||
|
std::sync::Arc::new(mito2::CacheManager::default()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_noop_file_purger() -> FilePurgerRef {
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Noop;
|
||||||
|
impl FilePurger for Noop {
|
||||||
|
fn send_request(&self, _request: PurgeRequest) {}
|
||||||
|
}
|
||||||
|
std::sync::Arc::new(Noop)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_parquet_metadata(
|
||||||
|
object_store: ObjectStore,
|
||||||
|
path: &str,
|
||||||
|
file_size: u64,
|
||||||
|
) -> std::result::Result<
|
||||||
|
parquet::file::metadata::ParquetMetaData,
|
||||||
|
Box<dyn std::error::Error + Send + Sync>,
|
||||||
|
> {
|
||||||
|
use parquet::file::metadata::ParquetMetaDataReader;
|
||||||
|
use parquet::file::FOOTER_SIZE;
|
||||||
|
let actual_size = if file_size == 0 {
|
||||||
|
object_store.stat(path).await?.content_length()
|
||||||
|
} else {
|
||||||
|
file_size
|
||||||
|
};
|
||||||
|
if actual_size < FOOTER_SIZE as u64 {
|
||||||
|
return Err("file too small".into());
|
||||||
|
}
|
||||||
|
let prefetch: u64 = 64 * 1024;
|
||||||
|
let start = actual_size.saturating_sub(prefetch);
|
||||||
|
let buffer = object_store
|
||||||
|
.read_with(path)
|
||||||
|
.range(start..actual_size)
|
||||||
|
.await?
|
||||||
|
.to_vec();
|
||||||
|
let buffer_len = buffer.len();
|
||||||
|
let mut footer = [0; 8];
|
||||||
|
footer.copy_from_slice(&buffer[buffer_len - FOOTER_SIZE..]);
|
||||||
|
let metadata_len = ParquetMetaDataReader::decode_footer(&footer)? as u64;
|
||||||
|
if actual_size - (FOOTER_SIZE as u64) < metadata_len {
|
||||||
|
return Err("invalid footer/metadata length".into());
|
||||||
|
}
|
||||||
|
if (metadata_len as usize) <= buffer_len - FOOTER_SIZE {
|
||||||
|
let metadata_start = buffer_len - metadata_len as usize - FOOTER_SIZE;
|
||||||
|
let meta = ParquetMetaDataReader::decode_metadata(
|
||||||
|
&buffer[metadata_start..buffer_len - FOOTER_SIZE],
|
||||||
|
)?;
|
||||||
|
Ok(meta)
|
||||||
|
} else {
|
||||||
|
let metadata_start = actual_size - metadata_len - FOOTER_SIZE as u64;
|
||||||
|
let data = object_store
|
||||||
|
.read_with(path)
|
||||||
|
.range(metadata_start..(actual_size - FOOTER_SIZE as u64))
|
||||||
|
.await?
|
||||||
|
.to_vec();
|
||||||
|
let meta = ParquetMetaDataReader::decode_metadata(&data)?;
|
||||||
|
Ok(meta)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::StorageConfigWrapper;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_decode() {
|
||||||
|
let cfg = std::fs::read_to_string("/home/lei/datanode-bulk.toml").unwrap();
|
||||||
|
let storage: StorageConfigWrapper = toml::from_str(&cfg).unwrap();
|
||||||
|
println!("{:?}", storage);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,9 +12,11 @@ default = ["geo"]
|
|||||||
geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
|
geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
ahash = "0.8"
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
|
bincode = "1.3"
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -32,6 +34,7 @@ geo = { version = "0.29", optional = true }
|
|||||||
geo-types = { version = "0.7", optional = true }
|
geo-types = { version = "0.7", optional = true }
|
||||||
geohash = { version = "0.13", optional = true }
|
geohash = { version = "0.13", optional = true }
|
||||||
h3o = { version = "0.6", optional = true }
|
h3o = { version = "0.6", optional = true }
|
||||||
|
hyperloglogplus = "0.4"
|
||||||
jsonb.workspace = true
|
jsonb.workspace = true
|
||||||
nalgebra.workspace = true
|
nalgebra.workspace = true
|
||||||
num = "0.4"
|
num = "0.4"
|
||||||
@@ -47,6 +50,7 @@ sql.workspace = true
|
|||||||
statrs = "0.16"
|
statrs = "0.16"
|
||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
table.workspace = true
|
table.workspace = true
|
||||||
|
uddsketch = { git = "https://github.com/GreptimeTeam/timescaledb-toolkit.git", rev = "84828fe8fb494a6a61412a3da96517fc80f7bb20" }
|
||||||
wkt = { version = "0.11", optional = true }
|
wkt = { version = "0.11", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
|||||||
20
src/common/function/src/aggr.rs
Normal file
20
src/common/function/src/aggr.rs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
mod hll;
|
||||||
|
mod uddsketch_state;
|
||||||
|
|
||||||
|
pub(crate) use hll::HllStateType;
|
||||||
|
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
|
||||||
|
pub use uddsketch_state::{UddSketchState, UDDSKETCH_STATE_NAME};
|
||||||
319
src/common/function/src/aggr/hll.rs
Normal file
319
src/common/function/src/aggr/hll.rs
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_query::prelude::*;
|
||||||
|
use common_telemetry::trace;
|
||||||
|
use datafusion::arrow::array::ArrayRef;
|
||||||
|
use datafusion::common::cast::{as_binary_array, as_string_array};
|
||||||
|
use datafusion::common::not_impl_err;
|
||||||
|
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||||
|
use datafusion::logical_expr::function::AccumulatorArgs;
|
||||||
|
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF};
|
||||||
|
use datafusion::prelude::create_udaf;
|
||||||
|
use datatypes::arrow::datatypes::DataType;
|
||||||
|
use hyperloglogplus::{HyperLogLog, HyperLogLogPlus};
|
||||||
|
|
||||||
|
use crate::utils::FixedRandomState;
|
||||||
|
|
||||||
|
pub const HLL_NAME: &str = "hll";
|
||||||
|
pub const HLL_MERGE_NAME: &str = "hll_merge";
|
||||||
|
|
||||||
|
const DEFAULT_PRECISION: u8 = 14;
|
||||||
|
|
||||||
|
pub(crate) type HllStateType = HyperLogLogPlus<String, FixedRandomState>;
|
||||||
|
|
||||||
|
pub struct HllState {
|
||||||
|
hll: HllStateType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for HllState {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "HllState<Opaque>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for HllState {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HllState {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
// Safety: the DEFAULT_PRECISION is fixed and valid
|
||||||
|
hll: HllStateType::new(DEFAULT_PRECISION, FixedRandomState::new()).unwrap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a UDF for the `hll` function.
|
||||||
|
///
|
||||||
|
/// `hll` accepts a string column and aggregates the
|
||||||
|
/// values into a HyperLogLog state.
|
||||||
|
pub fn state_udf_impl() -> AggregateUDF {
|
||||||
|
create_udaf(
|
||||||
|
HLL_NAME,
|
||||||
|
vec![DataType::Utf8],
|
||||||
|
Arc::new(DataType::Binary),
|
||||||
|
Volatility::Immutable,
|
||||||
|
Arc::new(Self::create_accumulator),
|
||||||
|
Arc::new(vec![DataType::Binary]),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a UDF for the `hll_merge` function.
|
||||||
|
///
|
||||||
|
/// `hll_merge` accepts a binary column of states generated by `hll`
|
||||||
|
/// and merges them into a single state.
|
||||||
|
pub fn merge_udf_impl() -> AggregateUDF {
|
||||||
|
create_udaf(
|
||||||
|
HLL_MERGE_NAME,
|
||||||
|
vec![DataType::Binary],
|
||||||
|
Arc::new(DataType::Binary),
|
||||||
|
Volatility::Immutable,
|
||||||
|
Arc::new(Self::create_merge_accumulator),
|
||||||
|
Arc::new(vec![DataType::Binary]),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, value: &str) {
|
||||||
|
self.hll.insert(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge(&mut self, raw: &[u8]) {
|
||||||
|
if let Ok(serialized) = bincode::deserialize::<HllStateType>(raw) {
|
||||||
|
if let Ok(()) = self.hll.merge(&serialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trace!("Warning: Failed to merge HyperLogLog from {:?}", raw);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_accumulator(acc_args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||||
|
let data_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||||
|
|
||||||
|
match data_type {
|
||||||
|
DataType::Utf8 => Ok(Box::new(HllState::new())),
|
||||||
|
other => not_impl_err!("{HLL_NAME} does not support data type: {other}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_merge_accumulator(acc_args: AccumulatorArgs) -> DfResult<Box<dyn DfAccumulator>> {
|
||||||
|
let data_type = acc_args.exprs[0].data_type(acc_args.schema)?;
|
||||||
|
|
||||||
|
match data_type {
|
||||||
|
DataType::Binary => Ok(Box::new(HllState::new())),
|
||||||
|
other => not_impl_err!("{HLL_MERGE_NAME} does not support data type: {other}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfAccumulator for HllState {
|
||||||
|
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||||
|
let array = &values[0];
|
||||||
|
|
||||||
|
match array.data_type() {
|
||||||
|
DataType::Utf8 => {
|
||||||
|
let string_array = as_string_array(array)?;
|
||||||
|
for value in string_array.iter().flatten() {
|
||||||
|
self.update(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
DataType::Binary => {
|
||||||
|
let binary_array = as_binary_array(array)?;
|
||||||
|
for v in binary_array.iter().flatten() {
|
||||||
|
self.merge(v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return not_impl_err!(
|
||||||
|
"HLL functions do not support data type: {}",
|
||||||
|
array.data_type()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||||
|
Ok(ScalarValue::Binary(Some(
|
||||||
|
bincode::serialize(&self.hll).map_err(|e| {
|
||||||
|
DataFusionError::Internal(format!("Failed to serialize HyperLogLog: {}", e))
|
||||||
|
})?,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
std::mem::size_of_val(&self.hll)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||||
|
Ok(vec![ScalarValue::Binary(Some(
|
||||||
|
bincode::serialize(&self.hll).map_err(|e| {
|
||||||
|
DataFusionError::Internal(format!("Failed to serialize HyperLogLog: {}", e))
|
||||||
|
})?,
|
||||||
|
))])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> {
|
||||||
|
let array = &states[0];
|
||||||
|
let binary_array = as_binary_array(array)?;
|
||||||
|
for v in binary_array.iter().flatten() {
|
||||||
|
self.merge(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use datafusion::arrow::array::{BinaryArray, StringArray};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_basic() {
|
||||||
|
let mut state = HllState::new();
|
||||||
|
state.update("1");
|
||||||
|
state.update("2");
|
||||||
|
state.update("3");
|
||||||
|
|
||||||
|
let result = state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||||
|
assert_eq!(hll.count().trunc() as u32, 3);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_roundtrip() {
|
||||||
|
let mut state = HllState::new();
|
||||||
|
state.update("1");
|
||||||
|
state.update("2");
|
||||||
|
|
||||||
|
// Serialize
|
||||||
|
let serialized = state.evaluate().unwrap();
|
||||||
|
|
||||||
|
// Create new state and merge the serialized data
|
||||||
|
let mut new_state = HllState::new();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = &serialized {
|
||||||
|
new_state.merge(bytes);
|
||||||
|
|
||||||
|
// Verify the merged state matches original
|
||||||
|
let result = new_state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(new_bytes)) = result {
|
||||||
|
let mut original: HllStateType = bincode::deserialize(bytes).unwrap();
|
||||||
|
let mut merged: HllStateType = bincode::deserialize(&new_bytes).unwrap();
|
||||||
|
assert_eq!(original.count(), merged.count());
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_batch_update() {
|
||||||
|
let mut state = HllState::new();
|
||||||
|
|
||||||
|
// Test string values
|
||||||
|
let str_values = vec!["a", "b", "c", "d", "e", "f", "g", "h", "i"];
|
||||||
|
let str_array = Arc::new(StringArray::from(str_values)) as ArrayRef;
|
||||||
|
state.update_batch(&[str_array]).unwrap();
|
||||||
|
|
||||||
|
let result = state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||||
|
assert_eq!(hll.count().trunc() as u32, 9);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_merge_batch() {
|
||||||
|
let mut state1 = HllState::new();
|
||||||
|
state1.update("1");
|
||||||
|
let state1_binary = state1.evaluate().unwrap();
|
||||||
|
|
||||||
|
let mut state2 = HllState::new();
|
||||||
|
state2.update("2");
|
||||||
|
let state2_binary = state2.evaluate().unwrap();
|
||||||
|
|
||||||
|
let mut merged_state = HllState::new();
|
||||||
|
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||||
|
(&state1_binary, &state2_binary)
|
||||||
|
{
|
||||||
|
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||||
|
bytes1.as_slice(),
|
||||||
|
bytes2.as_slice(),
|
||||||
|
])) as ArrayRef;
|
||||||
|
merged_state.merge_batch(&[binary_array]).unwrap();
|
||||||
|
|
||||||
|
let result = merged_state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||||
|
assert_eq!(hll.count().trunc() as u32, 2);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar values");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_merge_function() {
|
||||||
|
// Create two HLL states with different values
|
||||||
|
let mut state1 = HllState::new();
|
||||||
|
state1.update("1");
|
||||||
|
state1.update("2");
|
||||||
|
let state1_binary = state1.evaluate().unwrap();
|
||||||
|
|
||||||
|
let mut state2 = HllState::new();
|
||||||
|
state2.update("2");
|
||||||
|
state2.update("3");
|
||||||
|
let state2_binary = state2.evaluate().unwrap();
|
||||||
|
|
||||||
|
// Create a merge state and merge both states
|
||||||
|
let mut merge_state = HllState::new();
|
||||||
|
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||||
|
(&state1_binary, &state2_binary)
|
||||||
|
{
|
||||||
|
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||||
|
bytes1.as_slice(),
|
||||||
|
bytes2.as_slice(),
|
||||||
|
])) as ArrayRef;
|
||||||
|
merge_state.update_batch(&[binary_array]).unwrap();
|
||||||
|
|
||||||
|
let result = merge_state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let mut hll: HllStateType = bincode::deserialize(&bytes).unwrap();
|
||||||
|
// Should have 3 unique values: "1", "2", "3"
|
||||||
|
assert_eq!(hll.count().trunc() as u32, 3);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar values");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
307
src/common/function/src/aggr/uddsketch_state.rs
Normal file
307
src/common/function/src/aggr/uddsketch_state.rs
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_query::prelude::*;
|
||||||
|
use common_telemetry::trace;
|
||||||
|
use datafusion::common::cast::{as_binary_array, as_primitive_array};
|
||||||
|
use datafusion::common::not_impl_err;
|
||||||
|
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||||
|
use datafusion::logical_expr::function::AccumulatorArgs;
|
||||||
|
use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF};
|
||||||
|
use datafusion::physical_plan::expressions::Literal;
|
||||||
|
use datafusion::prelude::create_udaf;
|
||||||
|
use datatypes::arrow::array::ArrayRef;
|
||||||
|
use datatypes::arrow::datatypes::{DataType, Float64Type};
|
||||||
|
use uddsketch::{SketchHashKey, UDDSketch};
|
||||||
|
|
||||||
|
pub const UDDSKETCH_STATE_NAME: &str = "uddsketch_state";
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct UddSketchState {
|
||||||
|
uddsketch: UDDSketch,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UddSketchState {
|
||||||
|
pub fn new(bucket_size: u64, error_rate: f64) -> Self {
|
||||||
|
Self {
|
||||||
|
uddsketch: UDDSketch::new(bucket_size, error_rate),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn udf_impl() -> AggregateUDF {
|
||||||
|
create_udaf(
|
||||||
|
UDDSKETCH_STATE_NAME,
|
||||||
|
vec![DataType::Int64, DataType::Float64, DataType::Float64],
|
||||||
|
Arc::new(DataType::Binary),
|
||||||
|
Volatility::Immutable,
|
||||||
|
Arc::new(|args| {
|
||||||
|
let (bucket_size, error_rate) = downcast_accumulator_args(args)?;
|
||||||
|
Ok(Box::new(UddSketchState::new(bucket_size, error_rate)))
|
||||||
|
}),
|
||||||
|
Arc::new(vec![DataType::Binary]),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, value: f64) {
|
||||||
|
self.uddsketch.add_value(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge(&mut self, raw: &[u8]) {
|
||||||
|
if let Ok(uddsketch) = bincode::deserialize::<UDDSketch>(raw) {
|
||||||
|
if uddsketch.count() != 0 {
|
||||||
|
self.uddsketch.merge_sketch(&uddsketch);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
trace!("Warning: Failed to deserialize UDDSketch from {:?}", raw);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn downcast_accumulator_args(args: AccumulatorArgs) -> DfResult<(u64, f64)> {
|
||||||
|
let bucket_size = match args.exprs[0]
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<Literal>()
|
||||||
|
.map(|lit| lit.value())
|
||||||
|
{
|
||||||
|
Some(ScalarValue::Int64(Some(value))) => *value as u64,
|
||||||
|
_ => {
|
||||||
|
return not_impl_err!(
|
||||||
|
"{} not supported for bucket size: {}",
|
||||||
|
UDDSKETCH_STATE_NAME,
|
||||||
|
&args.exprs[0]
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let error_rate = match args.exprs[1]
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<Literal>()
|
||||||
|
.map(|lit| lit.value())
|
||||||
|
{
|
||||||
|
Some(ScalarValue::Float64(Some(value))) => *value,
|
||||||
|
_ => {
|
||||||
|
return not_impl_err!(
|
||||||
|
"{} not supported for error rate: {}",
|
||||||
|
UDDSKETCH_STATE_NAME,
|
||||||
|
&args.exprs[1]
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((bucket_size, error_rate))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfAccumulator for UddSketchState {
|
||||||
|
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
|
||||||
|
let array = &values[2]; // the third column is data value
|
||||||
|
let f64_array = as_primitive_array::<Float64Type>(array)?;
|
||||||
|
for v in f64_array.iter().flatten() {
|
||||||
|
self.update(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn evaluate(&mut self) -> DfResult<ScalarValue> {
|
||||||
|
Ok(ScalarValue::Binary(Some(
|
||||||
|
bincode::serialize(&self.uddsketch).map_err(|e| {
|
||||||
|
DataFusionError::Internal(format!("Failed to serialize UDDSketch: {}", e))
|
||||||
|
})?,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
// Base size of UDDSketch struct fields
|
||||||
|
let mut total_size = std::mem::size_of::<f64>() * 3 + // alpha, gamma, values_sum
|
||||||
|
std::mem::size_of::<u32>() + // compactions
|
||||||
|
std::mem::size_of::<u64>() * 2; // max_buckets, num_values
|
||||||
|
|
||||||
|
// Size of buckets (SketchHashMap)
|
||||||
|
// Each bucket entry contains:
|
||||||
|
// - SketchHashKey (enum with i64/Zero/Invalid variants)
|
||||||
|
// - SketchHashEntry (count: u64, next: SketchHashKey)
|
||||||
|
let bucket_entry_size = std::mem::size_of::<SketchHashKey>() + // key
|
||||||
|
std::mem::size_of::<u64>() + // count
|
||||||
|
std::mem::size_of::<SketchHashKey>(); // next
|
||||||
|
|
||||||
|
total_size += self.uddsketch.current_buckets_count() * bucket_entry_size;
|
||||||
|
|
||||||
|
total_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state(&mut self) -> DfResult<Vec<ScalarValue>> {
|
||||||
|
Ok(vec![ScalarValue::Binary(Some(
|
||||||
|
bincode::serialize(&self.uddsketch).map_err(|e| {
|
||||||
|
DataFusionError::Internal(format!("Failed to serialize UDDSketch: {}", e))
|
||||||
|
})?,
|
||||||
|
))])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> {
|
||||||
|
let array = &states[0];
|
||||||
|
let binary_array = as_binary_array(array)?;
|
||||||
|
for v in binary_array.iter().flatten() {
|
||||||
|
self.merge(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use datafusion::arrow::array::{BinaryArray, Float64Array};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_state_basic() {
|
||||||
|
let mut state = UddSketchState::new(10, 0.01);
|
||||||
|
state.update(1.0);
|
||||||
|
state.update(2.0);
|
||||||
|
state.update(3.0);
|
||||||
|
|
||||||
|
let result = state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||||
|
assert_eq!(deserialized.count(), 3);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_state_roundtrip() {
|
||||||
|
let mut state = UddSketchState::new(10, 0.01);
|
||||||
|
state.update(1.0);
|
||||||
|
state.update(2.0);
|
||||||
|
|
||||||
|
// Serialize
|
||||||
|
let serialized = state.evaluate().unwrap();
|
||||||
|
|
||||||
|
// Create new state and merge the serialized data
|
||||||
|
let mut new_state = UddSketchState::new(10, 0.01);
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = &serialized {
|
||||||
|
new_state.merge(bytes);
|
||||||
|
|
||||||
|
// Verify the merged state matches original by comparing deserialized values
|
||||||
|
let original_sketch: UDDSketch = bincode::deserialize(bytes).unwrap();
|
||||||
|
let new_result = new_state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(new_bytes)) = new_result {
|
||||||
|
let new_sketch: UDDSketch = bincode::deserialize(&new_bytes).unwrap();
|
||||||
|
assert_eq!(original_sketch.count(), new_sketch.count());
|
||||||
|
assert_eq!(original_sketch.sum(), new_sketch.sum());
|
||||||
|
assert_eq!(original_sketch.mean(), new_sketch.mean());
|
||||||
|
assert_eq!(original_sketch.max_error(), new_sketch.max_error());
|
||||||
|
// Compare a few quantiles to ensure statistical equivalence
|
||||||
|
for q in [0.1, 0.5, 0.9].iter() {
|
||||||
|
assert!(
|
||||||
|
(original_sketch.estimate_quantile(*q) - new_sketch.estimate_quantile(*q))
|
||||||
|
.abs()
|
||||||
|
< 1e-10,
|
||||||
|
"Quantile {} mismatch: original={}, new={}",
|
||||||
|
q,
|
||||||
|
original_sketch.estimate_quantile(*q),
|
||||||
|
new_sketch.estimate_quantile(*q)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_state_batch_update() {
|
||||||
|
let mut state = UddSketchState::new(10, 0.01);
|
||||||
|
let values = vec![1.0f64, 2.0, 3.0];
|
||||||
|
let array = Arc::new(Float64Array::from(values)) as ArrayRef;
|
||||||
|
|
||||||
|
state
|
||||||
|
.update_batch(&[array.clone(), array.clone(), array])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result = state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||||
|
assert_eq!(deserialized.count(), 3);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_state_merge_batch() {
|
||||||
|
let mut state1 = UddSketchState::new(10, 0.01);
|
||||||
|
state1.update(1.0);
|
||||||
|
let state1_binary = state1.evaluate().unwrap();
|
||||||
|
|
||||||
|
let mut state2 = UddSketchState::new(10, 0.01);
|
||||||
|
state2.update(2.0);
|
||||||
|
let state2_binary = state2.evaluate().unwrap();
|
||||||
|
|
||||||
|
let mut merged_state = UddSketchState::new(10, 0.01);
|
||||||
|
if let (ScalarValue::Binary(Some(bytes1)), ScalarValue::Binary(Some(bytes2))) =
|
||||||
|
(&state1_binary, &state2_binary)
|
||||||
|
{
|
||||||
|
let binary_array = Arc::new(BinaryArray::from(vec![
|
||||||
|
bytes1.as_slice(),
|
||||||
|
bytes2.as_slice(),
|
||||||
|
])) as ArrayRef;
|
||||||
|
merged_state.merge_batch(&[binary_array]).unwrap();
|
||||||
|
|
||||||
|
let result = merged_state.evaluate().unwrap();
|
||||||
|
if let ScalarValue::Binary(Some(bytes)) = result {
|
||||||
|
let deserialized: UDDSketch = bincode::deserialize(&bytes).unwrap();
|
||||||
|
assert_eq!(deserialized.count(), 2);
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar value");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("Expected binary scalar values");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_state_size() {
|
||||||
|
let mut state = UddSketchState::new(10, 0.01);
|
||||||
|
let initial_size = state.size();
|
||||||
|
|
||||||
|
// Add some values to create buckets
|
||||||
|
state.update(1.0);
|
||||||
|
state.update(2.0);
|
||||||
|
state.update(3.0);
|
||||||
|
|
||||||
|
let size_with_values = state.size();
|
||||||
|
assert!(
|
||||||
|
size_with_values > initial_size,
|
||||||
|
"Size should increase after adding values: initial={}, with_values={}",
|
||||||
|
initial_size,
|
||||||
|
size_with_values
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify size increases with more buckets
|
||||||
|
state.update(10.0); // This should create a new bucket
|
||||||
|
assert!(
|
||||||
|
state.size() > size_with_values,
|
||||||
|
"Size should increase after adding new bucket: prev={}, new={}",
|
||||||
|
size_with_values,
|
||||||
|
state.size()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -22,10 +22,12 @@ use crate::function::{AsyncFunctionRef, FunctionRef};
|
|||||||
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
|
||||||
use crate::scalars::date::DateFunction;
|
use crate::scalars::date::DateFunction;
|
||||||
use crate::scalars::expression::ExpressionFunction;
|
use crate::scalars::expression::ExpressionFunction;
|
||||||
|
use crate::scalars::hll_count::HllCalcFunction;
|
||||||
use crate::scalars::json::JsonFunction;
|
use crate::scalars::json::JsonFunction;
|
||||||
use crate::scalars::matches::MatchesFunction;
|
use crate::scalars::matches::MatchesFunction;
|
||||||
use crate::scalars::math::MathFunction;
|
use crate::scalars::math::MathFunction;
|
||||||
use crate::scalars::timestamp::TimestampFunction;
|
use crate::scalars::timestamp::TimestampFunction;
|
||||||
|
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||||
use crate::scalars::vector::VectorFunction;
|
use crate::scalars::vector::VectorFunction;
|
||||||
use crate::system::SystemFunction;
|
use crate::system::SystemFunction;
|
||||||
use crate::table::TableFunction;
|
use crate::table::TableFunction;
|
||||||
@@ -105,6 +107,8 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
|||||||
TimestampFunction::register(&function_registry);
|
TimestampFunction::register(&function_registry);
|
||||||
DateFunction::register(&function_registry);
|
DateFunction::register(&function_registry);
|
||||||
ExpressionFunction::register(&function_registry);
|
ExpressionFunction::register(&function_registry);
|
||||||
|
UddSketchCalcFunction::register(&function_registry);
|
||||||
|
HllCalcFunction::register(&function_registry);
|
||||||
|
|
||||||
// Aggregate functions
|
// Aggregate functions
|
||||||
AggregateFunctions::register(&function_registry);
|
AggregateFunctions::register(&function_registry);
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ pub mod scalars;
|
|||||||
mod system;
|
mod system;
|
||||||
mod table;
|
mod table;
|
||||||
|
|
||||||
|
pub mod aggr;
|
||||||
pub mod function;
|
pub mod function;
|
||||||
pub mod function_registry;
|
pub mod function_registry;
|
||||||
pub mod handlers;
|
pub mod handlers;
|
||||||
|
|||||||
@@ -22,7 +22,9 @@ pub mod matches;
|
|||||||
pub mod math;
|
pub mod math;
|
||||||
pub mod vector;
|
pub mod vector;
|
||||||
|
|
||||||
|
pub(crate) mod hll_count;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) mod test;
|
pub(crate) mod test;
|
||||||
pub(crate) mod timestamp;
|
pub(crate) mod timestamp;
|
||||||
|
pub(crate) mod uddsketch_calc;
|
||||||
pub mod udf;
|
pub mod udf;
|
||||||
|
|||||||
175
src/common/function/src/scalars/hll_count.rs
Normal file
175
src/common/function/src/scalars/hll_count.rs
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Implementation of the scalar function `hll_count`.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||||
|
use common_query::prelude::{Signature, Volatility};
|
||||||
|
use datatypes::data_type::ConcreteDataType;
|
||||||
|
use datatypes::prelude::Vector;
|
||||||
|
use datatypes::scalars::{ScalarVector, ScalarVectorBuilder};
|
||||||
|
use datatypes::vectors::{BinaryVector, MutableVector, UInt64VectorBuilder, VectorRef};
|
||||||
|
use hyperloglogplus::HyperLogLog;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
|
||||||
|
use crate::aggr::HllStateType;
|
||||||
|
use crate::function::{Function, FunctionContext};
|
||||||
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
|
const NAME: &str = "hll_count";
|
||||||
|
|
||||||
|
/// HllCalcFunction implements the scalar function `hll_count`.
|
||||||
|
///
|
||||||
|
/// It accepts one argument:
|
||||||
|
/// 1. The serialized HyperLogLogPlus state, as produced by the aggregator (binary).
|
||||||
|
///
|
||||||
|
/// For each row, it deserializes the sketch and returns the estimated cardinality.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct HllCalcFunction;
|
||||||
|
|
||||||
|
impl HllCalcFunction {
|
||||||
|
pub fn register(registry: &FunctionRegistry) {
|
||||||
|
registry.register(Arc::new(HllCalcFunction));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for HllCalcFunction {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Function for HllCalcFunction {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||||
|
Ok(ConcreteDataType::uint64_datatype())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn signature(&self) -> Signature {
|
||||||
|
// Only argument: HyperLogLogPlus state (binary)
|
||||||
|
Signature::exact(
|
||||||
|
vec![ConcreteDataType::binary_datatype()],
|
||||||
|
Volatility::Immutable,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||||
|
if columns.len() != 1 {
|
||||||
|
return InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!("hll_count expects 1 argument, got {}", columns.len()),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
let hll_vec = columns[0]
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<BinaryVector>()
|
||||||
|
.with_context(|| DowncastVectorSnafu {
|
||||||
|
err_msg: format!("expect BinaryVector, got {}", columns[0].vector_type_name()),
|
||||||
|
})?;
|
||||||
|
let len = hll_vec.len();
|
||||||
|
let mut builder = UInt64VectorBuilder::with_capacity(len);
|
||||||
|
|
||||||
|
for i in 0..len {
|
||||||
|
let hll_opt = hll_vec.get_data(i);
|
||||||
|
|
||||||
|
if hll_opt.is_none() {
|
||||||
|
builder.push_null();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let hll_bytes = hll_opt.unwrap();
|
||||||
|
|
||||||
|
// Deserialize the HyperLogLogPlus from its bincode representation
|
||||||
|
let mut hll: HllStateType = match bincode::deserialize(hll_bytes) {
|
||||||
|
Ok(h) => h,
|
||||||
|
Err(e) => {
|
||||||
|
common_telemetry::trace!("Failed to deserialize HyperLogLogPlus: {}", e);
|
||||||
|
builder.push_null();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
builder.push(Some(hll.count().round() as u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(builder.to_vector())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use datatypes::vectors::BinaryVector;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::utils::FixedRandomState;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_count_function() {
|
||||||
|
let function = HllCalcFunction;
|
||||||
|
assert_eq!("hll_count", function.name());
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::uint64_datatype(),
|
||||||
|
function
|
||||||
|
.return_type(&[ConcreteDataType::uint64_datatype()])
|
||||||
|
.unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create a test HLL
|
||||||
|
let mut hll = HllStateType::new(14, FixedRandomState::new()).unwrap();
|
||||||
|
for i in 1..=10 {
|
||||||
|
hll.insert(&i.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
let serialized_bytes = bincode::serialize(&hll).unwrap();
|
||||||
|
let args: Vec<VectorRef> = vec![Arc::new(BinaryVector::from(vec![Some(serialized_bytes)]))];
|
||||||
|
|
||||||
|
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
|
||||||
|
// Test cardinality estimate
|
||||||
|
if let datatypes::value::Value::UInt64(v) = result.get(0) {
|
||||||
|
assert_eq!(v, 10);
|
||||||
|
} else {
|
||||||
|
panic!("Expected uint64 value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hll_count_function_errors() {
|
||||||
|
let function = HllCalcFunction;
|
||||||
|
|
||||||
|
// Test with invalid number of arguments
|
||||||
|
let args: Vec<VectorRef> = vec![];
|
||||||
|
let result = function.eval(FunctionContext::default(), &args);
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert!(result
|
||||||
|
.unwrap_err()
|
||||||
|
.to_string()
|
||||||
|
.contains("hll_count expects 1 argument"));
|
||||||
|
|
||||||
|
// Test with invalid binary data
|
||||||
|
let args: Vec<VectorRef> = vec![Arc::new(BinaryVector::from(vec![Some(vec![1, 2, 3])]))]; // Invalid binary data
|
||||||
|
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
assert!(matches!(result.get(0), datatypes::value::Value::Null));
|
||||||
|
}
|
||||||
|
}
|
||||||
211
src/common/function/src/scalars/uddsketch_calc.rs
Normal file
211
src/common/function/src/scalars/uddsketch_calc.rs
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Implementation of the scalar function `uddsketch_calc`.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_query::error::{DowncastVectorSnafu, InvalidFuncArgsSnafu, Result};
|
||||||
|
use common_query::prelude::{Signature, Volatility};
|
||||||
|
use datatypes::data_type::ConcreteDataType;
|
||||||
|
use datatypes::prelude::Vector;
|
||||||
|
use datatypes::scalars::{ScalarVector, ScalarVectorBuilder};
|
||||||
|
use datatypes::vectors::{BinaryVector, Float64VectorBuilder, MutableVector, VectorRef};
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use uddsketch::UDDSketch;
|
||||||
|
|
||||||
|
use crate::function::{Function, FunctionContext};
|
||||||
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
|
const NAME: &str = "uddsketch_calc";
|
||||||
|
|
||||||
|
/// UddSketchCalcFunction implements the scalar function `uddsketch_calc`.
|
||||||
|
///
|
||||||
|
/// It accepts two arguments:
|
||||||
|
/// 1. A percentile (as f64) for which to compute the estimated quantile (e.g. 0.95 for p95).
|
||||||
|
/// 2. The serialized UDDSketch state, as produced by the aggregator (binary).
|
||||||
|
///
|
||||||
|
/// For each row, it deserializes the sketch and returns the computed quantile value.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct UddSketchCalcFunction;
|
||||||
|
|
||||||
|
impl UddSketchCalcFunction {
|
||||||
|
pub fn register(registry: &FunctionRegistry) {
|
||||||
|
registry.register(Arc::new(UddSketchCalcFunction));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for UddSketchCalcFunction {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Function for UddSketchCalcFunction {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||||
|
Ok(ConcreteDataType::float64_datatype())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn signature(&self) -> Signature {
|
||||||
|
// First argument: percentile (float64)
|
||||||
|
// Second argument: UDDSketch state (binary)
|
||||||
|
Signature::exact(
|
||||||
|
vec![
|
||||||
|
ConcreteDataType::float64_datatype(),
|
||||||
|
ConcreteDataType::binary_datatype(),
|
||||||
|
],
|
||||||
|
Volatility::Immutable,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||||
|
if columns.len() != 2 {
|
||||||
|
return InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!("uddsketch_calc expects 2 arguments, got {}", columns.len()),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
let perc_vec = &columns[0];
|
||||||
|
let sketch_vec = columns[1]
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<BinaryVector>()
|
||||||
|
.with_context(|| DowncastVectorSnafu {
|
||||||
|
err_msg: format!("expect BinaryVector, got {}", columns[1].vector_type_name()),
|
||||||
|
})?;
|
||||||
|
let len = sketch_vec.len();
|
||||||
|
let mut builder = Float64VectorBuilder::with_capacity(len);
|
||||||
|
|
||||||
|
for i in 0..len {
|
||||||
|
let perc_opt = perc_vec.get(i).as_f64_lossy();
|
||||||
|
let sketch_opt = sketch_vec.get_data(i);
|
||||||
|
|
||||||
|
if sketch_opt.is_none() || perc_opt.is_none() {
|
||||||
|
builder.push_null();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let sketch_bytes = sketch_opt.unwrap();
|
||||||
|
let perc = perc_opt.unwrap();
|
||||||
|
|
||||||
|
// Deserialize the UDDSketch from its bincode representation
|
||||||
|
let sketch: UDDSketch = match bincode::deserialize(sketch_bytes) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
common_telemetry::trace!("Failed to deserialize UDDSketch: {}", e);
|
||||||
|
builder.push_null();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Compute the estimated quantile from the sketch
|
||||||
|
let result = sketch.estimate_quantile(perc);
|
||||||
|
builder.push(Some(result));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(builder.to_vector())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use datatypes::vectors::{BinaryVector, Float64Vector};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_calc_function() {
|
||||||
|
let function = UddSketchCalcFunction;
|
||||||
|
assert_eq!("uddsketch_calc", function.name());
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::float64_datatype(),
|
||||||
|
function
|
||||||
|
.return_type(&[ConcreteDataType::float64_datatype()])
|
||||||
|
.unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create a test sketch
|
||||||
|
let mut sketch = UDDSketch::new(128, 0.01);
|
||||||
|
sketch.add_value(10.0);
|
||||||
|
sketch.add_value(20.0);
|
||||||
|
sketch.add_value(30.0);
|
||||||
|
sketch.add_value(40.0);
|
||||||
|
sketch.add_value(50.0);
|
||||||
|
sketch.add_value(60.0);
|
||||||
|
sketch.add_value(70.0);
|
||||||
|
sketch.add_value(80.0);
|
||||||
|
sketch.add_value(90.0);
|
||||||
|
sketch.add_value(100.0);
|
||||||
|
|
||||||
|
// Get expected values directly from the sketch
|
||||||
|
let expected_p50 = sketch.estimate_quantile(0.5);
|
||||||
|
let expected_p90 = sketch.estimate_quantile(0.9);
|
||||||
|
let expected_p95 = sketch.estimate_quantile(0.95);
|
||||||
|
|
||||||
|
let serialized = bincode::serialize(&sketch).unwrap();
|
||||||
|
let percentiles = vec![0.5, 0.9, 0.95];
|
||||||
|
|
||||||
|
let args: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(Float64Vector::from_vec(percentiles.clone())),
|
||||||
|
Arc::new(BinaryVector::from(vec![Some(serialized.clone()); 3])),
|
||||||
|
];
|
||||||
|
|
||||||
|
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(result.len(), 3);
|
||||||
|
|
||||||
|
// Test median (p50)
|
||||||
|
assert!(
|
||||||
|
matches!(result.get(0), datatypes::value::Value::Float64(v) if (v - expected_p50).abs() < 1e-10)
|
||||||
|
);
|
||||||
|
// Test p90
|
||||||
|
assert!(
|
||||||
|
matches!(result.get(1), datatypes::value::Value::Float64(v) if (v - expected_p90).abs() < 1e-10)
|
||||||
|
);
|
||||||
|
// Test p95
|
||||||
|
assert!(
|
||||||
|
matches!(result.get(2), datatypes::value::Value::Float64(v) if (v - expected_p95).abs() < 1e-10)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_uddsketch_calc_function_errors() {
|
||||||
|
let function = UddSketchCalcFunction;
|
||||||
|
|
||||||
|
// Test with invalid number of arguments
|
||||||
|
let args: Vec<VectorRef> = vec![Arc::new(Float64Vector::from_vec(vec![0.95]))];
|
||||||
|
let result = function.eval(FunctionContext::default(), &args);
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert!(result
|
||||||
|
.unwrap_err()
|
||||||
|
.to_string()
|
||||||
|
.contains("uddsketch_calc expects 2 arguments"));
|
||||||
|
|
||||||
|
// Test with invalid binary data
|
||||||
|
let args: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(Float64Vector::from_vec(vec![0.95])),
|
||||||
|
Arc::new(BinaryVector::from(vec![Some(vec![1, 2, 3])])), // Invalid binary data
|
||||||
|
];
|
||||||
|
let result = function.eval(FunctionContext::default(), &args).unwrap();
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
assert!(matches!(result.get(0), datatypes::value::Value::Null));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,6 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::hash::BuildHasher;
|
||||||
|
|
||||||
|
use ahash::RandomState;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Escapes special characters in the provided pattern string for `LIKE`.
|
/// Escapes special characters in the provided pattern string for `LIKE`.
|
||||||
///
|
///
|
||||||
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`)
|
||||||
@@ -32,6 +37,71 @@ pub fn escape_like_pattern(pattern: &str) -> String {
|
|||||||
})
|
})
|
||||||
.collect::<String>()
|
.collect::<String>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A random state with fixed seeds.
|
||||||
|
///
|
||||||
|
/// This is used to ensure that the hash values are consistent across
|
||||||
|
/// different processes, and easy to serialize and deserialize.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct FixedRandomState {
|
||||||
|
state: RandomState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedRandomState {
|
||||||
|
// some random seeds
|
||||||
|
const RANDOM_SEED_0: u64 = 0x517cc1b727220a95;
|
||||||
|
const RANDOM_SEED_1: u64 = 0x428a2f98d728ae22;
|
||||||
|
const RANDOM_SEED_2: u64 = 0x7137449123ef65cd;
|
||||||
|
const RANDOM_SEED_3: u64 = 0xb5c0fbcfec4d3b2f;
|
||||||
|
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
state: ahash::RandomState::with_seeds(
|
||||||
|
Self::RANDOM_SEED_0,
|
||||||
|
Self::RANDOM_SEED_1,
|
||||||
|
Self::RANDOM_SEED_2,
|
||||||
|
Self::RANDOM_SEED_3,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for FixedRandomState {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BuildHasher for FixedRandomState {
|
||||||
|
type Hasher = ahash::AHasher;
|
||||||
|
|
||||||
|
fn build_hasher(&self) -> Self::Hasher {
|
||||||
|
self.state.build_hasher()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_one<T: std::hash::Hash>(&self, x: T) -> u64 {
|
||||||
|
self.state.hash_one(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for FixedRandomState {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_unit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for FixedRandomState {
|
||||||
|
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
Ok(Self::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use futures::TryStreamExt;
|
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use moka::ops::compute::Op;
|
use moka::ops::compute::Op;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
@@ -54,9 +53,13 @@ fn init_factory(table_flow_manager: TableFlowManagerRef) -> Initializer<TableId,
|
|||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
table_flow_manager
|
table_flow_manager
|
||||||
.flows(table_id)
|
.flows(table_id)
|
||||||
.map_ok(|(key, value)| (key.flownode_id(), value.peer))
|
|
||||||
.try_collect::<HashMap<_, _>>()
|
|
||||||
.await
|
.await
|
||||||
|
.map(|flows| {
|
||||||
|
flows
|
||||||
|
.into_iter()
|
||||||
|
.map(|(key, value)| (key.flownode_id(), value.peer))
|
||||||
|
.collect::<HashMap<_, _>>()
|
||||||
|
})
|
||||||
// We must cache the `HashSet` even if it's empty,
|
// We must cache the `HashSet` even if it's empty,
|
||||||
// to avoid future requests to the remote storage next time;
|
// to avoid future requests to the remote storage next time;
|
||||||
// If the value is added to the remote storage,
|
// If the value is added to the remote storage,
|
||||||
|
|||||||
@@ -57,12 +57,10 @@ pub trait ClusterInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||||
///
|
|
||||||
/// This key cannot be used to describe the `Metasrv` because the `Metasrv` does not have
|
|
||||||
/// a `cluster_id`, it serves multiple clusters.
|
|
||||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct NodeInfoKey {
|
pub struct NodeInfoKey {
|
||||||
/// The cluster id.
|
/// The cluster id.
|
||||||
|
// todo(hl): remove cluster_id as it is not assigned anywhere.
|
||||||
pub cluster_id: ClusterId,
|
pub cluster_id: ClusterId,
|
||||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||||
pub role: Role,
|
pub role: Role,
|
||||||
@@ -232,8 +230,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NodeInfoKey> for Vec<u8> {
|
impl From<&NodeInfoKey> for Vec<u8> {
|
||||||
fn from(key: NodeInfoKey) -> Self {
|
fn from(key: &NodeInfoKey) -> Self {
|
||||||
format!(
|
format!(
|
||||||
"{}-{}-{}-{}",
|
"{}-{}-{}-{}",
|
||||||
CLUSTER_NODE_INFO_PREFIX,
|
CLUSTER_NODE_INFO_PREFIX,
|
||||||
@@ -315,7 +313,7 @@ mod tests {
|
|||||||
node_id: 2,
|
node_id: 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
let key_bytes: Vec<u8> = key.into();
|
let key_bytes: Vec<u8> = (&key).into();
|
||||||
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
||||||
|
|
||||||
assert_eq!(1, new_key.cluster_id);
|
assert_eq!(1, new_key.cluster_id);
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
mod metadata;
|
mod metadata;
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
use api::v1::flow::flow_request::Body as PbFlowRequest;
|
use api::v1::flow::flow_request::Body as PbFlowRequest;
|
||||||
use api::v1::flow::{CreateRequest, FlowRequest, FlowRequestHeader};
|
use api::v1::flow::{CreateRequest, FlowRequest, FlowRequestHeader};
|
||||||
@@ -28,7 +29,6 @@ use common_procedure::{
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::tracing_context::TracingContext;
|
use common_telemetry::tracing_context::TracingContext;
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::TryStreamExt;
|
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
@@ -77,6 +77,7 @@ impl CreateFlowProcedure {
|
|||||||
query_context,
|
query_context,
|
||||||
state: CreateFlowState::Prepare,
|
state: CreateFlowState::Prepare,
|
||||||
prev_flow_info_value: None,
|
prev_flow_info_value: None,
|
||||||
|
flow_type: None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -104,7 +105,7 @@ impl CreateFlowProcedure {
|
|||||||
if create_if_not_exists && or_replace {
|
if create_if_not_exists && or_replace {
|
||||||
// this is forbidden because not clear what does that mean exactly
|
// this is forbidden because not clear what does that mean exactly
|
||||||
return error::UnsupportedSnafu {
|
return error::UnsupportedSnafu {
|
||||||
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`".to_string(),
|
operation: "Create flow with both `IF NOT EXISTS` and `OR REPLACE`",
|
||||||
}
|
}
|
||||||
.fail();
|
.fail();
|
||||||
}
|
}
|
||||||
@@ -129,9 +130,10 @@ impl CreateFlowProcedure {
|
|||||||
.flow_metadata_manager
|
.flow_metadata_manager
|
||||||
.flow_route_manager()
|
.flow_route_manager()
|
||||||
.routes(flow_id)
|
.routes(flow_id)
|
||||||
.map_ok(|(_, value)| value.peer)
|
.await?
|
||||||
.try_collect::<Vec<_>>()
|
.into_iter()
|
||||||
.await?;
|
.map(|(_, value)| value.peer)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
self.data.flow_id = Some(flow_id);
|
self.data.flow_id = Some(flow_id);
|
||||||
self.data.peers = peers;
|
self.data.peers = peers;
|
||||||
info!("Replacing flow, flow_id: {}", flow_id);
|
info!("Replacing flow, flow_id: {}", flow_id);
|
||||||
@@ -175,6 +177,8 @@ impl CreateFlowProcedure {
|
|||||||
self.allocate_flow_id().await?;
|
self.allocate_flow_id().await?;
|
||||||
}
|
}
|
||||||
self.data.state = CreateFlowState::CreateFlows;
|
self.data.state = CreateFlowState::CreateFlows;
|
||||||
|
// determine flow type
|
||||||
|
self.data.flow_type = Some(determine_flow_type(&self.data.task));
|
||||||
|
|
||||||
Ok(Status::executing(true))
|
Ok(Status::executing(true))
|
||||||
}
|
}
|
||||||
@@ -309,6 +313,11 @@ impl Procedure for CreateFlowProcedure {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn determine_flow_type(_flow_task: &CreateFlowTask) -> FlowType {
|
||||||
|
// TODO(discord9): determine flow type
|
||||||
|
FlowType::RecordingRule
|
||||||
|
}
|
||||||
|
|
||||||
/// The state of [CreateFlowProcedure].
|
/// The state of [CreateFlowProcedure].
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
|
||||||
pub enum CreateFlowState {
|
pub enum CreateFlowState {
|
||||||
@@ -322,6 +331,35 @@ pub enum CreateFlowState {
|
|||||||
CreateMetadata,
|
CreateMetadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The type of flow.
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||||
|
pub enum FlowType {
|
||||||
|
/// The flow is a recording rule task.
|
||||||
|
RecordingRule,
|
||||||
|
/// The flow is a streaming task.
|
||||||
|
Streaming,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlowType {
|
||||||
|
pub const RECORDING_RULE: &str = "recording_rule";
|
||||||
|
pub const STREAMING: &str = "streaming";
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for FlowType {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::RecordingRule
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for FlowType {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
FlowType::RecordingRule => write!(f, "{}", FlowType::RECORDING_RULE),
|
||||||
|
FlowType::Streaming => write!(f, "{}", FlowType::STREAMING),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The serializable data.
|
/// The serializable data.
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct CreateFlowData {
|
pub struct CreateFlowData {
|
||||||
@@ -335,6 +373,7 @@ pub struct CreateFlowData {
|
|||||||
/// For verify if prev value is consistent when need to update flow metadata.
|
/// For verify if prev value is consistent when need to update flow metadata.
|
||||||
/// only set when `or_replace` is true.
|
/// only set when `or_replace` is true.
|
||||||
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
pub(crate) prev_flow_info_value: Option<DeserializedValueWithBytes<FlowInfoValue>>,
|
||||||
|
pub(crate) flow_type: Option<FlowType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<&CreateFlowData> for CreateRequest {
|
impl From<&CreateFlowData> for CreateRequest {
|
||||||
@@ -342,7 +381,7 @@ impl From<&CreateFlowData> for CreateRequest {
|
|||||||
let flow_id = value.flow_id.unwrap();
|
let flow_id = value.flow_id.unwrap();
|
||||||
let source_table_ids = &value.source_table_ids;
|
let source_table_ids = &value.source_table_ids;
|
||||||
|
|
||||||
CreateRequest {
|
let mut req = CreateRequest {
|
||||||
flow_id: Some(api::v1::FlowId { id: flow_id }),
|
flow_id: Some(api::v1::FlowId { id: flow_id }),
|
||||||
source_table_ids: source_table_ids
|
source_table_ids: source_table_ids
|
||||||
.iter()
|
.iter()
|
||||||
@@ -356,7 +395,11 @@ impl From<&CreateFlowData> for CreateRequest {
|
|||||||
comment: value.task.comment.clone(),
|
comment: value.task.comment.clone(),
|
||||||
sql: value.task.sql.clone(),
|
sql: value.task.sql.clone(),
|
||||||
flow_options: value.task.flow_options.clone(),
|
flow_options: value.task.flow_options.clone(),
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||||
|
req.flow_options.insert("flow_type".to_string(), flow_type);
|
||||||
|
req
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,7 +412,7 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
|||||||
expire_after,
|
expire_after,
|
||||||
comment,
|
comment,
|
||||||
sql,
|
sql,
|
||||||
flow_options: options,
|
flow_options: mut options,
|
||||||
..
|
..
|
||||||
} = value.task.clone();
|
} = value.task.clone();
|
||||||
|
|
||||||
@@ -386,19 +429,21 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
|||||||
.map(|(idx, peer)| (idx as u32, FlowRouteValue { peer: peer.clone() }))
|
.map(|(idx, peer)| (idx as u32, FlowRouteValue { peer: peer.clone() }))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
(
|
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||||
FlowInfoValue {
|
options.insert("flow_type".to_string(), flow_type);
|
||||||
source_table_ids: value.source_table_ids.clone(),
|
|
||||||
sink_table_name,
|
let flow_info = FlowInfoValue {
|
||||||
flownode_ids,
|
source_table_ids: value.source_table_ids.clone(),
|
||||||
catalog_name,
|
sink_table_name,
|
||||||
flow_name,
|
flownode_ids,
|
||||||
raw_sql: sql,
|
catalog_name,
|
||||||
expire_after,
|
flow_name,
|
||||||
comment,
|
raw_sql: sql,
|
||||||
options,
|
expire_after,
|
||||||
},
|
comment,
|
||||||
flow_routes,
|
options,
|
||||||
)
|
};
|
||||||
|
|
||||||
|
(flow_info, flow_routes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ impl State for DropDatabaseExecutor {
|
|||||||
.await?;
|
.await?;
|
||||||
executor.invalidate_table_cache(ddl_ctx).await?;
|
executor.invalidate_table_cache(ddl_ctx).await?;
|
||||||
executor
|
executor
|
||||||
.on_drop_regions(ddl_ctx, &self.physical_region_routes)
|
.on_drop_regions(ddl_ctx, &self.physical_region_routes, true)
|
||||||
.await?;
|
.await?;
|
||||||
info!("Table: {}({}) is dropped", self.table_name, self.table_id);
|
info!("Table: {}({}) is dropped", self.table_name, self.table_id);
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_catalog::format_full_flow_name;
|
use common_catalog::format_full_flow_name;
|
||||||
use futures::TryStreamExt;
|
|
||||||
use snafu::{ensure, OptionExt};
|
use snafu::{ensure, OptionExt};
|
||||||
|
|
||||||
use crate::ddl::drop_flow::DropFlowProcedure;
|
use crate::ddl::drop_flow::DropFlowProcedure;
|
||||||
@@ -39,9 +38,10 @@ impl DropFlowProcedure {
|
|||||||
.flow_metadata_manager
|
.flow_metadata_manager
|
||||||
.flow_route_manager()
|
.flow_route_manager()
|
||||||
.routes(self.data.task.flow_id)
|
.routes(self.data.task.flow_id)
|
||||||
.map_ok(|(_, value)| value)
|
.await?
|
||||||
.try_collect::<Vec<_>>()
|
.into_iter()
|
||||||
.await?;
|
.map(|(_, value)| value)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
ensure!(
|
ensure!(
|
||||||
!flow_route_values.is_empty(),
|
!flow_route_values.is_empty(),
|
||||||
error::FlowRouteNotFoundSnafu {
|
error::FlowRouteNotFoundSnafu {
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ impl DropTableProcedure {
|
|||||||
|
|
||||||
pub async fn on_datanode_drop_regions(&mut self) -> Result<Status> {
|
pub async fn on_datanode_drop_regions(&mut self) -> Result<Status> {
|
||||||
self.executor
|
self.executor
|
||||||
.on_drop_regions(&self.context, &self.data.physical_region_routes)
|
.on_drop_regions(&self.context, &self.data.physical_region_routes, false)
|
||||||
.await?;
|
.await?;
|
||||||
self.data.state = DropTableState::DeleteTombstone;
|
self.data.state = DropTableState::DeleteTombstone;
|
||||||
Ok(Status::executing(true))
|
Ok(Status::executing(true))
|
||||||
|
|||||||
@@ -214,6 +214,7 @@ impl DropTableExecutor {
|
|||||||
&self,
|
&self,
|
||||||
ctx: &DdlContext,
|
ctx: &DdlContext,
|
||||||
region_routes: &[RegionRoute],
|
region_routes: &[RegionRoute],
|
||||||
|
fast_path: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let leaders = find_leaders(region_routes);
|
let leaders = find_leaders(region_routes);
|
||||||
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
|
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
|
||||||
@@ -236,6 +237,7 @@ impl DropTableExecutor {
|
|||||||
}),
|
}),
|
||||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||||
region_id: region_id.as_u64(),
|
region_id: region_id.as_u64(),
|
||||||
|
fast_path,
|
||||||
})),
|
})),
|
||||||
};
|
};
|
||||||
let datanode = datanode.clone();
|
let datanode = datanode.clone();
|
||||||
|
|||||||
@@ -16,9 +16,9 @@ pub mod flow_info;
|
|||||||
pub(crate) mod flow_name;
|
pub(crate) mod flow_name;
|
||||||
pub(crate) mod flow_route;
|
pub(crate) mod flow_route;
|
||||||
pub mod flow_state;
|
pub mod flow_state;
|
||||||
|
mod flownode_addr_helper;
|
||||||
pub(crate) mod flownode_flow;
|
pub(crate) mod flownode_flow;
|
||||||
pub(crate) mod table_flow;
|
pub(crate) mod table_flow;
|
||||||
|
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -506,7 +506,6 @@ mod tests {
|
|||||||
let routes = flow_metadata_manager
|
let routes = flow_metadata_manager
|
||||||
.flow_route_manager()
|
.flow_route_manager()
|
||||||
.routes(flow_id)
|
.routes(flow_id)
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -538,7 +537,6 @@ mod tests {
|
|||||||
let nodes = flow_metadata_manager
|
let nodes = flow_metadata_manager
|
||||||
.table_flow_manager()
|
.table_flow_manager()
|
||||||
.flows(table_id)
|
.flows(table_id)
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -727,7 +725,6 @@ mod tests {
|
|||||||
let routes = flow_metadata_manager
|
let routes = flow_metadata_manager
|
||||||
.flow_route_manager()
|
.flow_route_manager()
|
||||||
.routes(flow_id)
|
.routes(flow_id)
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -759,7 +756,6 @@ mod tests {
|
|||||||
let nodes = flow_metadata_manager
|
let nodes = flow_metadata_manager
|
||||||
.table_flow_manager()
|
.table_flow_manager()
|
||||||
.flows(table_id)
|
.flows(table_id)
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|||||||
@@ -12,14 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::TryStreamExt;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::flow::FlowScoped;
|
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||||
|
use crate::key::node_address::NodeAddressKey;
|
||||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
@@ -167,10 +168,7 @@ impl FlowRouteManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves all [FlowRouteValue]s of the specified `flow_id`.
|
/// Retrieves all [FlowRouteValue]s of the specified `flow_id`.
|
||||||
pub fn routes(
|
pub async fn routes(&self, flow_id: FlowId) -> Result<Vec<(FlowRouteKey, FlowRouteValue)>> {
|
||||||
&self,
|
|
||||||
flow_id: FlowId,
|
|
||||||
) -> BoxStream<'static, Result<(FlowRouteKey, FlowRouteValue)>> {
|
|
||||||
let start_key = FlowRouteKey::range_start_key(flow_id);
|
let start_key = FlowRouteKey::range_start_key(flow_id);
|
||||||
let req = RangeRequest::new().with_prefix(start_key);
|
let req = RangeRequest::new().with_prefix(start_key);
|
||||||
let stream = PaginationStream::new(
|
let stream = PaginationStream::new(
|
||||||
@@ -181,7 +179,9 @@ impl FlowRouteManager {
|
|||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
Box::pin(stream)
|
let mut res = stream.try_collect::<Vec<_>>().await?;
|
||||||
|
self.remap_flow_route_addresses(&mut res).await?;
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a create flow routes transaction.
|
/// Builds a create flow routes transaction.
|
||||||
@@ -203,6 +203,28 @@ impl FlowRouteManager {
|
|||||||
|
|
||||||
Ok(Txn::new().and_then(txns))
|
Ok(Txn::new().and_then(txns))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn remap_flow_route_addresses(
|
||||||
|
&self,
|
||||||
|
flow_routes: &mut [(FlowRouteKey, FlowRouteValue)],
|
||||||
|
) -> Result<()> {
|
||||||
|
let keys = flow_routes
|
||||||
|
.iter()
|
||||||
|
.map(|(_, value)| NodeAddressKey::with_flownode(value.peer.id))
|
||||||
|
.collect();
|
||||||
|
let flow_node_addrs =
|
||||||
|
flownode_addr_helper::get_flownode_addresses(&self.kv_backend, keys).await?;
|
||||||
|
for (_, flow_route_value) in flow_routes.iter_mut() {
|
||||||
|
let flownode_id = flow_route_value.peer.id;
|
||||||
|
// If an id lacks a corresponding address in the `flow_node_addrs`,
|
||||||
|
// it means the old address in `table_flow_value` is still valid,
|
||||||
|
// which is expected.
|
||||||
|
if let Some(node_addr) = flow_node_addrs.get(&flownode_id) {
|
||||||
|
flow_route_value.peer.addr = node_addr.peer.addr.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
47
src/common/meta/src/key/flow/flownode_addr_helper.rs
Normal file
47
src/common/meta/src/key/flow/flownode_addr_helper.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::key::node_address::{NodeAddressKey, NodeAddressValue};
|
||||||
|
use crate::key::{MetadataKey, MetadataValue};
|
||||||
|
use crate::kv_backend::KvBackendRef;
|
||||||
|
use crate::rpc::store::BatchGetRequest;
|
||||||
|
|
||||||
|
/// Get the addresses of the flownodes.
|
||||||
|
/// The result is a map: node_id -> NodeAddressValue
|
||||||
|
pub(crate) async fn get_flownode_addresses(
|
||||||
|
kv_backend: &KvBackendRef,
|
||||||
|
keys: Vec<NodeAddressKey>,
|
||||||
|
) -> Result<HashMap<u64, NodeAddressValue>> {
|
||||||
|
if keys.is_empty() {
|
||||||
|
return Ok(HashMap::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
let req = BatchGetRequest {
|
||||||
|
keys: keys.into_iter().map(|k| k.to_bytes()).collect(),
|
||||||
|
};
|
||||||
|
kv_backend
|
||||||
|
.batch_get(req)
|
||||||
|
.await?
|
||||||
|
.kvs
|
||||||
|
.into_iter()
|
||||||
|
.map(|kv| {
|
||||||
|
let key = NodeAddressKey::from_bytes(&kv.key)?;
|
||||||
|
let value = NodeAddressValue::try_from_raw_value(&kv.value)?;
|
||||||
|
Ok((key.node_id, value))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::BoxStream;
|
use futures::TryStreamExt;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -22,7 +22,8 @@ use snafu::OptionExt;
|
|||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::flow::FlowScoped;
|
use crate::key::flow::{flownode_addr_helper, FlowScoped};
|
||||||
|
use crate::key::node_address::NodeAddressKey;
|
||||||
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetadataKey, MetadataValue};
|
||||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
@@ -196,10 +197,7 @@ impl TableFlowManager {
|
|||||||
/// Retrieves all [TableFlowKey]s of the specified `table_id`.
|
/// Retrieves all [TableFlowKey]s of the specified `table_id`.
|
||||||
///
|
///
|
||||||
/// TODO(discord9): add cache for it since range request does not support cache.
|
/// TODO(discord9): add cache for it since range request does not support cache.
|
||||||
pub fn flows(
|
pub async fn flows(&self, table_id: TableId) -> Result<Vec<(TableFlowKey, TableFlowValue)>> {
|
||||||
&self,
|
|
||||||
table_id: TableId,
|
|
||||||
) -> BoxStream<'static, Result<(TableFlowKey, TableFlowValue)>> {
|
|
||||||
let start_key = TableFlowKey::range_start_key(table_id);
|
let start_key = TableFlowKey::range_start_key(table_id);
|
||||||
let req = RangeRequest::new().with_prefix(start_key);
|
let req = RangeRequest::new().with_prefix(start_key);
|
||||||
let stream = PaginationStream::new(
|
let stream = PaginationStream::new(
|
||||||
@@ -210,7 +208,9 @@ impl TableFlowManager {
|
|||||||
)
|
)
|
||||||
.into_stream();
|
.into_stream();
|
||||||
|
|
||||||
Box::pin(stream)
|
let mut res = stream.try_collect::<Vec<_>>().await?;
|
||||||
|
self.remap_table_flow_addresses(&mut res).await?;
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a create table flow transaction.
|
/// Builds a create table flow transaction.
|
||||||
@@ -238,6 +238,28 @@ impl TableFlowManager {
|
|||||||
|
|
||||||
Ok(Txn::new().and_then(txns))
|
Ok(Txn::new().and_then(txns))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn remap_table_flow_addresses(
|
||||||
|
&self,
|
||||||
|
table_flows: &mut [(TableFlowKey, TableFlowValue)],
|
||||||
|
) -> Result<()> {
|
||||||
|
let keys = table_flows
|
||||||
|
.iter()
|
||||||
|
.map(|(_, value)| NodeAddressKey::with_flownode(value.peer.id))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let flownode_addrs =
|
||||||
|
flownode_addr_helper::get_flownode_addresses(&self.kv_backend, keys).await?;
|
||||||
|
for (_, table_flow_value) in table_flows.iter_mut() {
|
||||||
|
let flownode_id = table_flow_value.peer.id;
|
||||||
|
// If an id lacks a corresponding address in the `flow_node_addrs`,
|
||||||
|
// it means the old address in `table_flow_value` is still valid,
|
||||||
|
// which is expected.
|
||||||
|
if let Some(flownode_addr) = flownode_addrs.get(&flownode_id) {
|
||||||
|
table_flow_value.peer.addr = flownode_addr.peer.addr.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -39,6 +39,10 @@ impl NodeAddressKey {
|
|||||||
pub fn with_datanode(node_id: u64) -> Self {
|
pub fn with_datanode(node_id: u64) -> Self {
|
||||||
Self::new(Role::Datanode, node_id)
|
Self::new(Role::Datanode, node_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_flownode(node_id: u64) -> Self {
|
||||||
|
Self::new(Role::Flownode, node_id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
|
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ pub mod kv_backend;
|
|||||||
pub mod leadership_notifier;
|
pub mod leadership_notifier;
|
||||||
pub mod lock_key;
|
pub mod lock_key;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
|
pub mod node_expiry_listener;
|
||||||
pub mod node_manager;
|
pub mod node_manager;
|
||||||
pub mod peer;
|
pub mod peer;
|
||||||
pub mod range_stream;
|
pub mod range_stream;
|
||||||
|
|||||||
152
src/common/meta/src/node_expiry_listener.rs
Normal file
152
src/common/meta/src/node_expiry_listener.rs
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use common_telemetry::{debug, error, info, warn};
|
||||||
|
use tokio::task::JoinHandle;
|
||||||
|
use tokio::time::{interval, MissedTickBehavior};
|
||||||
|
|
||||||
|
use crate::cluster::{NodeInfo, NodeInfoKey};
|
||||||
|
use crate::error;
|
||||||
|
use crate::kv_backend::ResettableKvBackendRef;
|
||||||
|
use crate::leadership_notifier::LeadershipChangeListener;
|
||||||
|
use crate::rpc::store::RangeRequest;
|
||||||
|
use crate::rpc::KeyValue;
|
||||||
|
|
||||||
|
/// [NodeExpiryListener] periodically checks all node info in memory and removes
|
||||||
|
/// expired node info to prevent memory leak.
|
||||||
|
pub struct NodeExpiryListener {
|
||||||
|
handle: Mutex<Option<JoinHandle<()>>>,
|
||||||
|
max_idle_time: Duration,
|
||||||
|
in_memory: ResettableKvBackendRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for NodeExpiryListener {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeExpiryListener {
|
||||||
|
pub fn new(max_idle_time: Duration, in_memory: ResettableKvBackendRef) -> Self {
|
||||||
|
Self {
|
||||||
|
handle: Mutex::new(None),
|
||||||
|
max_idle_time,
|
||||||
|
in_memory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&self) {
|
||||||
|
let mut handle = self.handle.lock().unwrap();
|
||||||
|
if handle.is_none() {
|
||||||
|
let in_memory = self.in_memory.clone();
|
||||||
|
|
||||||
|
let max_idle_time = self.max_idle_time;
|
||||||
|
let ticker_loop = tokio::spawn(async move {
|
||||||
|
// Run clean task every minute.
|
||||||
|
let mut interval = interval(Duration::from_secs(60));
|
||||||
|
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
if let Err(e) = Self::clean_expired_nodes(&in_memory, max_idle_time).await {
|
||||||
|
error!(e; "Failed to clean expired node");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
*handle = Some(ticker_loop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self) {
|
||||||
|
if let Some(handle) = self.handle.lock().unwrap().take() {
|
||||||
|
handle.abort();
|
||||||
|
info!("Node expiry listener stopped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cleans expired nodes from memory.
|
||||||
|
async fn clean_expired_nodes(
|
||||||
|
in_memory: &ResettableKvBackendRef,
|
||||||
|
max_idle_time: Duration,
|
||||||
|
) -> error::Result<()> {
|
||||||
|
let node_keys = Self::list_expired_nodes(in_memory, max_idle_time).await?;
|
||||||
|
for key in node_keys {
|
||||||
|
let key_bytes: Vec<u8> = (&key).into();
|
||||||
|
if let Err(e) = in_memory.delete(&key_bytes, false).await {
|
||||||
|
warn!(e; "Failed to delete expired node: {:?}", key_bytes);
|
||||||
|
} else {
|
||||||
|
debug!("Deleted expired node key: {:?}", key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists expired nodes that have been inactive more than `max_idle_time`.
|
||||||
|
async fn list_expired_nodes(
|
||||||
|
in_memory: &ResettableKvBackendRef,
|
||||||
|
max_idle_time: Duration,
|
||||||
|
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
|
||||||
|
let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
|
||||||
|
let req = RangeRequest::new().with_prefix(prefix);
|
||||||
|
let current_time_millis = common_time::util::current_time_millis();
|
||||||
|
let resp = in_memory.range(req).await?;
|
||||||
|
Ok(resp
|
||||||
|
.kvs
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(move |KeyValue { key, value }| {
|
||||||
|
let Ok(info) = NodeInfo::try_from(value).inspect_err(|e| {
|
||||||
|
warn!(e; "Unrecognized node info value");
|
||||||
|
}) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
if (current_time_millis - info.last_activity_ts) > max_idle_time.as_millis() as i64
|
||||||
|
{
|
||||||
|
NodeInfoKey::try_from(key)
|
||||||
|
.inspect_err(|e| {
|
||||||
|
warn!(e; "Unrecognized node info key: {:?}", info.peer);
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
.inspect(|node_key| {
|
||||||
|
debug!("Found expired node: {:?}", node_key);
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl LeadershipChangeListener for NodeExpiryListener {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"NodeExpiryListener"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn on_leader_start(&self) -> error::Result<()> {
|
||||||
|
self.start().await;
|
||||||
|
info!(
|
||||||
|
"On leader start, node expiry listener started with max idle time: {:?}",
|
||||||
|
self.max_idle_time
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn on_leader_stop(&self) -> error::Result<()> {
|
||||||
|
self.stop();
|
||||||
|
info!("On leader stop, node expiry listener stopped");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -171,6 +171,10 @@ pub struct S3Config {
|
|||||||
pub secret_access_key: SecretString,
|
pub secret_access_key: SecretString,
|
||||||
pub endpoint: Option<String>,
|
pub endpoint: Option<String>,
|
||||||
pub region: Option<String>,
|
pub region: Option<String>,
|
||||||
|
/// Enable virtual host style so that opendal will send API requests in virtual host style instead of path style.
|
||||||
|
/// By default, opendal will send API to https://s3.us-east-1.amazonaws.com/bucket_name
|
||||||
|
/// Enabled, opendal will send API to https://bucket_name.s3.us-east-1.amazonaws.com
|
||||||
|
pub enable_virtual_host_style: bool,
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub cache: ObjectStorageCacheConfig,
|
pub cache: ObjectStorageCacheConfig,
|
||||||
pub http_client: HttpClientConfig,
|
pub http_client: HttpClientConfig,
|
||||||
@@ -185,6 +189,7 @@ impl PartialEq for S3Config {
|
|||||||
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
|
||||||
&& self.endpoint == other.endpoint
|
&& self.endpoint == other.endpoint
|
||||||
&& self.region == other.region
|
&& self.region == other.region
|
||||||
|
&& self.enable_virtual_host_style == other.enable_virtual_host_style
|
||||||
&& self.cache == other.cache
|
&& self.cache == other.cache
|
||||||
&& self.http_client == other.http_client
|
&& self.http_client == other.http_client
|
||||||
}
|
}
|
||||||
@@ -289,6 +294,7 @@ impl Default for S3Config {
|
|||||||
root: String::default(),
|
root: String::default(),
|
||||||
access_key_id: SecretString::from(String::default()),
|
access_key_id: SecretString::from(String::default()),
|
||||||
secret_access_key: SecretString::from(String::default()),
|
secret_access_key: SecretString::from(String::default()),
|
||||||
|
enable_virtual_host_style: false,
|
||||||
endpoint: Option::default(),
|
endpoint: Option::default(),
|
||||||
region: Option::default(),
|
region: Option::default(),
|
||||||
cache: ObjectStorageCacheConfig::default(),
|
cache: ObjectStorageCacheConfig::default(),
|
||||||
|
|||||||
@@ -1218,7 +1218,10 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let response = mock_region_server
|
let response = mock_region_server
|
||||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
.handle_request(
|
||||||
|
region_id,
|
||||||
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(response.affected_rows, 0);
|
assert_eq!(response.affected_rows, 0);
|
||||||
@@ -1310,7 +1313,10 @@ mod tests {
|
|||||||
.insert(region_id, RegionEngineWithStatus::Ready(engine.clone()));
|
.insert(region_id, RegionEngineWithStatus::Ready(engine.clone()));
|
||||||
|
|
||||||
mock_region_server
|
mock_region_server
|
||||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
.handle_request(
|
||||||
|
region_id,
|
||||||
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap_err();
|
.unwrap_err();
|
||||||
|
|
||||||
|
|||||||
@@ -41,10 +41,13 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
|
|||||||
|
|
||||||
if s3_config.endpoint.is_some() {
|
if s3_config.endpoint.is_some() {
|
||||||
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
|
||||||
};
|
}
|
||||||
if s3_config.region.is_some() {
|
if s3_config.region.is_some() {
|
||||||
builder = builder.region(s3_config.region.as_ref().unwrap());
|
builder = builder.region(s3_config.region.as_ref().unwrap());
|
||||||
};
|
}
|
||||||
|
if s3_config.enable_virtual_host_style {
|
||||||
|
builder = builder.enable_virtual_host_style();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(ObjectStore::new(builder)
|
Ok(ObjectStore::new(builder)
|
||||||
.context(error::InitBackendSnafu)?
|
.context(error::InitBackendSnafu)?
|
||||||
|
|||||||
@@ -32,5 +32,5 @@ pub mod types;
|
|||||||
pub mod value;
|
pub mod value;
|
||||||
pub mod vectors;
|
pub mod vectors;
|
||||||
|
|
||||||
pub use arrow;
|
pub use arrow::{self, compute};
|
||||||
pub use error::{Error, Result};
|
pub use error::{Error, Result};
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ impl Flownode for FlowWorkerManager {
|
|||||||
.map_err(to_meta_err(snafu::location!()))?;
|
.map_err(to_meta_err(snafu::location!()))?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sended and {} output rows flushed",
|
"Done to flush flow_id={:?} with {} input rows flushed, {} rows sent and {} output rows flushed",
|
||||||
flow_id, flushed_input_rows, rows_send, row
|
flow_id, flushed_input_rows, rows_send, row
|
||||||
);
|
);
|
||||||
Ok(FlowResponse {
|
Ok(FlowResponse {
|
||||||
|
|||||||
@@ -60,12 +60,12 @@ async fn query_flow_state(
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct HeartbeatTask {
|
pub struct HeartbeatTask {
|
||||||
node_id: u64,
|
node_id: u64,
|
||||||
|
node_epoch: u64,
|
||||||
peer_addr: String,
|
peer_addr: String,
|
||||||
meta_client: Arc<MetaClient>,
|
meta_client: Arc<MetaClient>,
|
||||||
report_interval: Duration,
|
report_interval: Duration,
|
||||||
retry_interval: Duration,
|
retry_interval: Duration,
|
||||||
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||||
start_time_ms: u64,
|
|
||||||
running: Arc<AtomicBool>,
|
running: Arc<AtomicBool>,
|
||||||
query_stat_size: Option<SizeReportSender>,
|
query_stat_size: Option<SizeReportSender>,
|
||||||
}
|
}
|
||||||
@@ -83,12 +83,12 @@ impl HeartbeatTask {
|
|||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
node_id: opts.node_id.unwrap_or(0),
|
node_id: opts.node_id.unwrap_or(0),
|
||||||
|
node_epoch: common_time::util::current_time_millis() as u64,
|
||||||
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||||
meta_client,
|
meta_client,
|
||||||
report_interval: heartbeat_opts.interval,
|
report_interval: heartbeat_opts.interval,
|
||||||
retry_interval: heartbeat_opts.retry_interval,
|
retry_interval: heartbeat_opts.retry_interval,
|
||||||
resp_handler_executor,
|
resp_handler_executor,
|
||||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
|
||||||
running: Arc::new(AtomicBool::new(false)),
|
running: Arc::new(AtomicBool::new(false)),
|
||||||
query_stat_size: None,
|
query_stat_size: None,
|
||||||
}
|
}
|
||||||
@@ -103,6 +103,11 @@ impl HeartbeatTask {
|
|||||||
warn!("Heartbeat task started multiple times");
|
warn!("Heartbeat task started multiple times");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.create_streams().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_streams(&self) -> Result<(), Error> {
|
||||||
info!("Start to establish the heartbeat connection to metasrv.");
|
info!("Start to establish the heartbeat connection to metasrv.");
|
||||||
let (req_sender, resp_stream) = self
|
let (req_sender, resp_stream) = self
|
||||||
.meta_client
|
.meta_client
|
||||||
@@ -181,7 +186,7 @@ impl HeartbeatTask {
|
|||||||
mut outgoing_rx: mpsc::Receiver<OutgoingMessage>,
|
mut outgoing_rx: mpsc::Receiver<OutgoingMessage>,
|
||||||
) {
|
) {
|
||||||
let report_interval = self.report_interval;
|
let report_interval = self.report_interval;
|
||||||
let start_time_ms = self.start_time_ms;
|
let node_epoch = self.node_epoch;
|
||||||
let self_peer = Some(Peer {
|
let self_peer = Some(Peer {
|
||||||
id: self.node_id,
|
id: self.node_id,
|
||||||
addr: self.peer_addr.clone(),
|
addr: self.peer_addr.clone(),
|
||||||
@@ -198,7 +203,8 @@ impl HeartbeatTask {
|
|||||||
|
|
||||||
let heartbeat_request = HeartbeatRequest {
|
let heartbeat_request = HeartbeatRequest {
|
||||||
peer: self_peer,
|
peer: self_peer,
|
||||||
info: Self::build_node_info(start_time_ms),
|
node_epoch,
|
||||||
|
info: Self::build_node_info(node_epoch),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -208,6 +214,7 @@ impl HeartbeatTask {
|
|||||||
if let Some(message) = message {
|
if let Some(message) = message {
|
||||||
Self::new_heartbeat_request(&heartbeat_request, Some(message), &latest_report)
|
Self::new_heartbeat_request(&heartbeat_request, Some(message), &latest_report)
|
||||||
} else {
|
} else {
|
||||||
|
warn!("Sender has been dropped, exiting the heartbeat loop");
|
||||||
// Receives None that means Sender was dropped, we need to break the current loop
|
// Receives None that means Sender was dropped, we need to break the current loop
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -230,6 +237,8 @@ impl HeartbeatTask {
|
|||||||
// set the timeout to half of the report interval so that it wouldn't delay heartbeat if something went horribly wrong
|
// set the timeout to half of the report interval so that it wouldn't delay heartbeat if something went horribly wrong
|
||||||
latest_report = query_flow_state(&query_stat_size, report_interval / 2).await;
|
latest_report = query_flow_state(&query_stat_size, report_interval / 2).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("flownode heartbeat task stopped.");
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,7 +256,11 @@ impl HeartbeatTask {
|
|||||||
error!(e; "Error while handling heartbeat response");
|
error!(e; "Error while handling heartbeat response");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None) => break,
|
Ok(None) => {
|
||||||
|
warn!("Heartbeat response stream closed");
|
||||||
|
capture_self.start_with_retry(retry_interval).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(e; "Occur error while reading heartbeat response");
|
error!(e; "Occur error while reading heartbeat response");
|
||||||
capture_self.start_with_retry(retry_interval).await;
|
capture_self.start_with_retry(retry_interval).await;
|
||||||
@@ -273,7 +286,7 @@ impl HeartbeatTask {
|
|||||||
|
|
||||||
info!("Try to re-establish the heartbeat connection to metasrv.");
|
info!("Try to re-establish the heartbeat connection to metasrv.");
|
||||||
|
|
||||||
if self.start().await.is_ok() {
|
if self.create_streams().await.is_ok() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ use common_meta::heartbeat::handler::{
|
|||||||
};
|
};
|
||||||
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
|
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
|
||||||
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
|
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
|
||||||
use common_telemetry::{debug, error, info};
|
use common_telemetry::{debug, error, info, warn};
|
||||||
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
|
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
|
||||||
use servers::addrs;
|
use servers::addrs;
|
||||||
use servers::heartbeat_options::HeartbeatOptions;
|
use servers::heartbeat_options::HeartbeatOptions;
|
||||||
@@ -42,8 +42,8 @@ use crate::metrics::{HEARTBEAT_RECV_COUNT, HEARTBEAT_SENT_COUNT};
|
|||||||
pub struct HeartbeatTask {
|
pub struct HeartbeatTask {
|
||||||
peer_addr: String,
|
peer_addr: String,
|
||||||
meta_client: Arc<MetaClient>,
|
meta_client: Arc<MetaClient>,
|
||||||
report_interval: u64,
|
report_interval: Duration,
|
||||||
retry_interval: u64,
|
retry_interval: Duration,
|
||||||
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||||
start_time_ms: u64,
|
start_time_ms: u64,
|
||||||
}
|
}
|
||||||
@@ -58,8 +58,8 @@ impl HeartbeatTask {
|
|||||||
HeartbeatTask {
|
HeartbeatTask {
|
||||||
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||||
meta_client,
|
meta_client,
|
||||||
report_interval: heartbeat_opts.interval.as_millis() as u64,
|
report_interval: heartbeat_opts.interval,
|
||||||
retry_interval: heartbeat_opts.retry_interval.as_millis() as u64,
|
retry_interval: heartbeat_opts.retry_interval,
|
||||||
resp_handler_executor,
|
resp_handler_executor,
|
||||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||||
}
|
}
|
||||||
@@ -103,13 +103,15 @@ impl HeartbeatTask {
|
|||||||
HEARTBEAT_RECV_COUNT.with_label_values(&["success"]).inc();
|
HEARTBEAT_RECV_COUNT.with_label_values(&["success"]).inc();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None) => break,
|
Ok(None) => {
|
||||||
|
warn!("Heartbeat response stream closed");
|
||||||
|
capture_self.start_with_retry(retry_interval).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
HEARTBEAT_RECV_COUNT.with_label_values(&["error"]).inc();
|
HEARTBEAT_RECV_COUNT.with_label_values(&["error"]).inc();
|
||||||
error!(e; "Occur error while reading heartbeat response");
|
error!(e; "Occur error while reading heartbeat response");
|
||||||
capture_self
|
capture_self.start_with_retry(retry_interval).await;
|
||||||
.start_with_retry(Duration::from_millis(retry_interval))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -177,12 +179,13 @@ impl HeartbeatTask {
|
|||||||
if let Some(message) = message {
|
if let Some(message) = message {
|
||||||
Self::new_heartbeat_request(&heartbeat_request, Some(message))
|
Self::new_heartbeat_request(&heartbeat_request, Some(message))
|
||||||
} else {
|
} else {
|
||||||
|
warn!("Sender has been dropped, exiting the heartbeat loop");
|
||||||
// Receives None that means Sender was dropped, we need to break the current loop
|
// Receives None that means Sender was dropped, we need to break the current loop
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = &mut sleep => {
|
_ = &mut sleep => {
|
||||||
sleep.as_mut().reset(Instant::now() + Duration::from_millis(report_interval));
|
sleep.as_mut().reset(Instant::now() + report_interval);
|
||||||
Self::new_heartbeat_request(&heartbeat_request, None)
|
Self::new_heartbeat_request(&heartbeat_request, None)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -237,6 +237,13 @@ impl Instance {
|
|||||||
|
|
||||||
let output = match stmt {
|
let output = match stmt {
|
||||||
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
|
||||||
|
// TODO: remove this when format is supported in datafusion
|
||||||
|
if let Statement::Explain(explain) = &stmt {
|
||||||
|
if let Some(format) = explain.format() {
|
||||||
|
query_ctx.set_explain_format(format.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let stmt = QueryStatement::Sql(stmt);
|
let stmt = QueryStatement::Sql(stmt);
|
||||||
let plan = self
|
let plan = self
|
||||||
.statement_executor
|
.statement_executor
|
||||||
|
|||||||
@@ -42,7 +42,16 @@ impl BloomFilterApplier {
|
|||||||
) -> Result<Vec<Range<usize>>> {
|
) -> Result<Vec<Range<usize>>> {
|
||||||
let rows_per_segment = self.meta.rows_per_segment as usize;
|
let rows_per_segment = self.meta.rows_per_segment as usize;
|
||||||
let start_seg = search_range.start / rows_per_segment;
|
let start_seg = search_range.start / rows_per_segment;
|
||||||
let end_seg = search_range.end.div_ceil(rows_per_segment);
|
let mut end_seg = search_range.end.div_ceil(rows_per_segment);
|
||||||
|
|
||||||
|
if end_seg == self.meta.segment_loc_indices.len() + 1 {
|
||||||
|
// In a previous version, there was a bug where if the last segment was all null,
|
||||||
|
// this segment would not be written into the index. This caused the slice
|
||||||
|
// `self.meta.segment_loc_indices[start_seg..end_seg]` to go out of bounds due to
|
||||||
|
// the missing segment. Since the `search` function does not search for nulls,
|
||||||
|
// we can simply ignore the last segment in this buggy scenario.
|
||||||
|
end_seg -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
let locs = &self.meta.segment_loc_indices[start_seg..end_seg];
|
let locs = &self.meta.segment_loc_indices[start_seg..end_seg];
|
||||||
|
|
||||||
|
|||||||
@@ -64,6 +64,9 @@ pub struct BloomFilterCreator {
|
|||||||
/// Storage for finalized Bloom filters.
|
/// Storage for finalized Bloom filters.
|
||||||
finalized_bloom_filters: FinalizedBloomFilterStorage,
|
finalized_bloom_filters: FinalizedBloomFilterStorage,
|
||||||
|
|
||||||
|
/// Row count that finalized so far.
|
||||||
|
finalized_row_count: usize,
|
||||||
|
|
||||||
/// Global memory usage of the bloom filter creator.
|
/// Global memory usage of the bloom filter creator.
|
||||||
global_memory_usage: Arc<AtomicUsize>,
|
global_memory_usage: Arc<AtomicUsize>,
|
||||||
}
|
}
|
||||||
@@ -96,6 +99,7 @@ impl BloomFilterCreator {
|
|||||||
global_memory_usage,
|
global_memory_usage,
|
||||||
global_memory_usage_threshold,
|
global_memory_usage_threshold,
|
||||||
),
|
),
|
||||||
|
finalized_row_count: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,6 +140,7 @@ impl BloomFilterCreator {
|
|||||||
|
|
||||||
if self.accumulated_row_count % self.rows_per_segment == 0 {
|
if self.accumulated_row_count % self.rows_per_segment == 0 {
|
||||||
self.finalize_segment().await?;
|
self.finalize_segment().await?;
|
||||||
|
self.finalized_row_count = self.accumulated_row_count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,6 +166,7 @@ impl BloomFilterCreator {
|
|||||||
|
|
||||||
if self.accumulated_row_count % self.rows_per_segment == 0 {
|
if self.accumulated_row_count % self.rows_per_segment == 0 {
|
||||||
self.finalize_segment().await?;
|
self.finalize_segment().await?;
|
||||||
|
self.finalized_row_count = self.accumulated_row_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -168,7 +174,7 @@ impl BloomFilterCreator {
|
|||||||
|
|
||||||
/// Finalizes any remaining segments and writes the bloom filters and metadata to the provided writer.
|
/// Finalizes any remaining segments and writes the bloom filters and metadata to the provided writer.
|
||||||
pub async fn finish(&mut self, mut writer: impl AsyncWrite + Unpin) -> Result<()> {
|
pub async fn finish(&mut self, mut writer: impl AsyncWrite + Unpin) -> Result<()> {
|
||||||
if !self.cur_seg_distinct_elems.is_empty() {
|
if self.accumulated_row_count > self.finalized_row_count {
|
||||||
self.finalize_segment().await?;
|
self.finalize_segment().await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -406,4 +412,35 @@ mod tests {
|
|||||||
assert!(bf.contains(&b"f"));
|
assert!(bf.contains(&b"f"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_final_seg_all_null() {
|
||||||
|
let mut writer = Cursor::new(Vec::new());
|
||||||
|
let mut creator = BloomFilterCreator::new(
|
||||||
|
2,
|
||||||
|
Arc::new(MockExternalTempFileProvider::new()),
|
||||||
|
Arc::new(AtomicUsize::new(0)),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
|
creator
|
||||||
|
.push_n_row_elems(4, vec![b"a".to_vec(), b"b".to_vec()])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
creator.push_row_elems(Vec::new()).await.unwrap();
|
||||||
|
|
||||||
|
creator.finish(&mut writer).await.unwrap();
|
||||||
|
|
||||||
|
let bytes = writer.into_inner();
|
||||||
|
let total_size = bytes.len();
|
||||||
|
let meta_size_offset = total_size - 4;
|
||||||
|
let meta_size = u32::from_le_bytes((&bytes[meta_size_offset..]).try_into().unwrap());
|
||||||
|
|
||||||
|
let meta_bytes = &bytes[total_size - meta_size as usize - 4..total_size - 4];
|
||||||
|
let meta = BloomFilterMeta::decode(meta_bytes).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(meta.rows_per_segment, 2);
|
||||||
|
assert_eq!(meta.segment_count, 3);
|
||||||
|
assert_eq!(meta.row_count, 5);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ use crate::fulltext_index::create::{FulltextIndexCreator, TantivyFulltextIndexCr
|
|||||||
use crate::fulltext_index::search::{FulltextIndexSearcher, RowId, TantivyFulltextIndexSearcher};
|
use crate::fulltext_index::search::{FulltextIndexSearcher, RowId, TantivyFulltextIndexSearcher};
|
||||||
use crate::fulltext_index::{Analyzer, Config};
|
use crate::fulltext_index::{Analyzer, Config};
|
||||||
|
|
||||||
async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager>) {
|
async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager<String>>) {
|
||||||
let staging_dir = create_temp_dir(prefix);
|
let staging_dir = create_temp_dir(prefix);
|
||||||
let path = staging_dir.path().to_path_buf();
|
let path = staging_dir.path().to_path_buf();
|
||||||
(
|
(
|
||||||
@@ -68,13 +68,13 @@ async fn test_search(
|
|||||||
let file_accessor = Arc::new(MockFileAccessor::new(prefix));
|
let file_accessor = Arc::new(MockFileAccessor::new(prefix));
|
||||||
let puffin_manager = FsPuffinManager::new(stager, file_accessor);
|
let puffin_manager = FsPuffinManager::new(stager, file_accessor);
|
||||||
|
|
||||||
let file_name = "fulltext_index";
|
let file_name = "fulltext_index".to_string();
|
||||||
let blob_key = "fulltext_index";
|
let blob_key = "fulltext_index".to_string();
|
||||||
let mut writer = puffin_manager.writer(file_name).await.unwrap();
|
let mut writer = puffin_manager.writer(&file_name).await.unwrap();
|
||||||
create_index(prefix, &mut writer, blob_key, texts, config).await;
|
create_index(prefix, &mut writer, &blob_key, texts, config).await;
|
||||||
|
|
||||||
let reader = puffin_manager.reader(file_name).await.unwrap();
|
let reader = puffin_manager.reader(&file_name).await.unwrap();
|
||||||
let index_dir = reader.dir(blob_key).await.unwrap();
|
let index_dir = reader.dir(&blob_key).await.unwrap();
|
||||||
let searcher = TantivyFulltextIndexSearcher::new(index_dir.path()).unwrap();
|
let searcher = TantivyFulltextIndexSearcher::new(index_dir.path()).unwrap();
|
||||||
let results = searcher.search(query).await.unwrap();
|
let results = searcher.search(query).await.unwrap();
|
||||||
|
|
||||||
|
|||||||
@@ -198,13 +198,13 @@ impl Inner {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
let leader = self
|
let leader_addr = self
|
||||||
.ask_leader
|
.ask_leader
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get_leader()
|
.get_leader()
|
||||||
.context(error::NoLeaderSnafu)?;
|
.context(error::NoLeaderSnafu)?;
|
||||||
let mut leader = self.make_client(leader)?;
|
let mut leader = self.make_client(&leader_addr)?;
|
||||||
|
|
||||||
let (sender, receiver) = mpsc::channel::<HeartbeatRequest>(128);
|
let (sender, receiver) = mpsc::channel::<HeartbeatRequest>(128);
|
||||||
|
|
||||||
@@ -236,7 +236,11 @@ impl Inner {
|
|||||||
.await
|
.await
|
||||||
.map_err(error::Error::from)?
|
.map_err(error::Error::from)?
|
||||||
.context(error::CreateHeartbeatStreamSnafu)?;
|
.context(error::CreateHeartbeatStreamSnafu)?;
|
||||||
info!("Success to create heartbeat stream to server: {:#?}", res);
|
|
||||||
|
info!(
|
||||||
|
"Success to create heartbeat stream to server: {}, response: {:#?}",
|
||||||
|
leader_addr, res
|
||||||
|
);
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
HeartbeatSender::new(self.id, self.role, sender),
|
HeartbeatSender::new(self.id, self.role, sender),
|
||||||
|
|||||||
@@ -44,6 +44,7 @@ use mailbox_handler::MailboxHandler;
|
|||||||
use on_leader_start_handler::OnLeaderStartHandler;
|
use on_leader_start_handler::OnLeaderStartHandler;
|
||||||
use publish_heartbeat_handler::PublishHeartbeatHandler;
|
use publish_heartbeat_handler::PublishHeartbeatHandler;
|
||||||
use region_lease_handler::RegionLeaseHandler;
|
use region_lease_handler::RegionLeaseHandler;
|
||||||
|
use remap_flow_peer_handler::RemapFlowPeerHandler;
|
||||||
use response_header_handler::ResponseHeaderHandler;
|
use response_header_handler::ResponseHeaderHandler;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
@@ -71,6 +72,7 @@ pub mod mailbox_handler;
|
|||||||
pub mod on_leader_start_handler;
|
pub mod on_leader_start_handler;
|
||||||
pub mod publish_heartbeat_handler;
|
pub mod publish_heartbeat_handler;
|
||||||
pub mod region_lease_handler;
|
pub mod region_lease_handler;
|
||||||
|
pub mod remap_flow_peer_handler;
|
||||||
pub mod response_header_handler;
|
pub mod response_header_handler;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -573,6 +575,7 @@ impl HeartbeatHandlerGroupBuilder {
|
|||||||
self.add_handler_last(publish_heartbeat_handler);
|
self.add_handler_last(publish_heartbeat_handler);
|
||||||
}
|
}
|
||||||
self.add_handler_last(CollectStatsHandler::new(self.flush_stats_factor));
|
self.add_handler_last(CollectStatsHandler::new(self.flush_stats_factor));
|
||||||
|
self.add_handler_last(RemapFlowPeerHandler::default());
|
||||||
|
|
||||||
if let Some(flow_state_handler) = self.flow_state_handler.take() {
|
if let Some(flow_state_handler) = self.flow_state_handler.take() {
|
||||||
self.add_handler_last(flow_state_handler);
|
self.add_handler_last(flow_state_handler);
|
||||||
@@ -853,7 +856,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(12, handlers.len());
|
assert_eq!(13, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
@@ -868,6 +871,7 @@ mod tests {
|
|||||||
"MailboxHandler",
|
"MailboxHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -888,7 +892,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(13, handlers.len());
|
assert_eq!(14, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
@@ -904,6 +908,7 @@ mod tests {
|
|||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -921,7 +926,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(13, handlers.len());
|
assert_eq!(14, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
@@ -937,6 +942,7 @@ mod tests {
|
|||||||
"MailboxHandler",
|
"MailboxHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -954,7 +960,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(13, handlers.len());
|
assert_eq!(14, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
@@ -970,6 +976,7 @@ mod tests {
|
|||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -987,7 +994,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(13, handlers.len());
|
assert_eq!(14, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
@@ -1003,6 +1010,7 @@ mod tests {
|
|||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -1020,7 +1028,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(12, handlers.len());
|
assert_eq!(13, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
@@ -1035,6 +1043,7 @@ mod tests {
|
|||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -1052,7 +1061,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(12, handlers.len());
|
assert_eq!(13, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
@@ -1067,6 +1076,7 @@ mod tests {
|
|||||||
"MailboxHandler",
|
"MailboxHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"ResponseHeaderHandler",
|
"ResponseHeaderHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
@@ -1084,7 +1094,7 @@ mod tests {
|
|||||||
|
|
||||||
let group = builder.build().unwrap();
|
let group = builder.build().unwrap();
|
||||||
let handlers = group.handlers;
|
let handlers = group.handlers;
|
||||||
assert_eq!(12, handlers.len());
|
assert_eq!(13, handlers.len());
|
||||||
|
|
||||||
let names = [
|
let names = [
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
@@ -1099,6 +1109,7 @@ mod tests {
|
|||||||
"MailboxHandler",
|
"MailboxHandler",
|
||||||
"FilterInactiveRegionStatsHandler",
|
"FilterInactiveRegionStatsHandler",
|
||||||
"CollectStatsHandler",
|
"CollectStatsHandler",
|
||||||
|
"RemapFlowPeerHandler",
|
||||||
];
|
];
|
||||||
|
|
||||||
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
for (handler, name) in handlers.iter().zip(names.into_iter()) {
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ pub struct CheckLeaderHandler;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl HeartbeatHandler for CheckLeaderHandler {
|
impl HeartbeatHandler for CheckLeaderHandler {
|
||||||
fn is_acceptable(&self, role: Role) -> bool {
|
fn is_acceptable(&self, _role: Role) -> bool {
|
||||||
role == Role::Datanode
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
|
|||||||
@@ -157,7 +157,7 @@ fn extract_base_info(request: &HeartbeatRequest) -> Option<(NodeInfoKey, Peer, P
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
async fn put_into_memory_store(ctx: &mut Context, key: NodeInfoKey, value: NodeInfo) -> Result<()> {
|
||||||
let key = key.into();
|
let key = (&key).into();
|
||||||
let value = value.try_into().context(InvalidClusterInfoFormatSnafu)?;
|
let value = value.try_into().context(InvalidClusterInfoFormatSnafu)?;
|
||||||
let put_req = PutRequest {
|
let put_req = PutRequest {
|
||||||
key,
|
key,
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use common_meta::key::node_address::{NodeAddressKey, NodeAddressValue};
|
|||||||
use common_meta::key::{MetadataKey, MetadataValue};
|
use common_meta::key::{MetadataKey, MetadataValue};
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::store::PutRequest;
|
use common_meta::rpc::store::PutRequest;
|
||||||
use common_telemetry::{error, warn};
|
use common_telemetry::{error, info, warn};
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
@@ -185,6 +185,10 @@ async fn rewrite_node_address(ctx: &mut Context, stat: &Stat) {
|
|||||||
|
|
||||||
match ctx.leader_cached_kv_backend.put(put).await {
|
match ctx.leader_cached_kv_backend.put(put).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
|
info!(
|
||||||
|
"Successfully updated datanode `NodeAddressValue`: {:?}",
|
||||||
|
peer
|
||||||
|
);
|
||||||
// broadcast invalidating cache
|
// broadcast invalidating cache
|
||||||
let cache_idents = stat
|
let cache_idents = stat
|
||||||
.table_ids()
|
.table_ids()
|
||||||
@@ -200,11 +204,14 @@ async fn rewrite_node_address(ctx: &mut Context, stat: &Stat) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(e; "Failed to update NodeAddressValue: {:?}", peer);
|
error!(e; "Failed to update datanode `NodeAddressValue`: {:?}", peer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
warn!("Failed to serialize NodeAddressValue: {:?}", peer);
|
warn!(
|
||||||
|
"Failed to serialize datanode `NodeAddressValue`: {:?}",
|
||||||
|
peer
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
92
src/meta-srv/src/handler/remap_flow_peer_handler.rs
Normal file
92
src/meta-srv/src/handler/remap_flow_peer_handler.rs
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use api::v1::meta::{HeartbeatRequest, Peer, Role};
|
||||||
|
use common_meta::key::node_address::{NodeAddressKey, NodeAddressValue};
|
||||||
|
use common_meta::key::{MetadataKey, MetadataValue};
|
||||||
|
use common_meta::rpc::store::PutRequest;
|
||||||
|
use common_telemetry::{error, info, warn};
|
||||||
|
use dashmap::DashMap;
|
||||||
|
|
||||||
|
use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler};
|
||||||
|
use crate::metasrv::Context;
|
||||||
|
use crate::Result;
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct RemapFlowPeerHandler {
|
||||||
|
/// flow_node_id -> epoch
|
||||||
|
epoch_cache: DashMap<u64, u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl HeartbeatHandler for RemapFlowPeerHandler {
|
||||||
|
fn is_acceptable(&self, role: Role) -> bool {
|
||||||
|
role == Role::Flownode
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle(
|
||||||
|
&self,
|
||||||
|
req: &HeartbeatRequest,
|
||||||
|
ctx: &mut Context,
|
||||||
|
_acc: &mut HeartbeatAccumulator,
|
||||||
|
) -> Result<HandleControl> {
|
||||||
|
let Some(peer) = req.peer.as_ref() else {
|
||||||
|
return Ok(HandleControl::Continue);
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_epoch = req.node_epoch;
|
||||||
|
let flow_node_id = peer.id;
|
||||||
|
|
||||||
|
let refresh = if let Some(mut epoch) = self.epoch_cache.get_mut(&flow_node_id) {
|
||||||
|
if current_epoch > *epoch.value() {
|
||||||
|
*epoch.value_mut() = current_epoch;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.epoch_cache.insert(flow_node_id, current_epoch);
|
||||||
|
true
|
||||||
|
};
|
||||||
|
|
||||||
|
if refresh {
|
||||||
|
rewrite_node_address(ctx, peer).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(HandleControl::Continue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn rewrite_node_address(ctx: &mut Context, peer: &Peer) {
|
||||||
|
let key = NodeAddressKey::with_flownode(peer.id).to_bytes();
|
||||||
|
if let Ok(value) = NodeAddressValue::new(peer.clone().into()).try_as_raw_value() {
|
||||||
|
let put = PutRequest {
|
||||||
|
key,
|
||||||
|
value,
|
||||||
|
prev_kv: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
match ctx.leader_cached_kv_backend.put(put).await {
|
||||||
|
Ok(_) => {
|
||||||
|
info!("Successfully updated flow `NodeAddressValue`: {:?}", peer);
|
||||||
|
// TODO(discord): broadcast invalidating cache to all frontends
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!(e; "Failed to update flow `NodeAddressValue`: {:?}", peer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Failed to serialize flow `NodeAddressValue`: {:?}", peer);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -32,6 +32,7 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBac
|
|||||||
use common_meta::leadership_notifier::{
|
use common_meta::leadership_notifier::{
|
||||||
LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
|
LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
|
||||||
};
|
};
|
||||||
|
use common_meta::node_expiry_listener::NodeExpiryListener;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
||||||
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
||||||
@@ -151,6 +152,8 @@ pub struct MetasrvOptions {
|
|||||||
#[cfg(feature = "pg_kvbackend")]
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
/// Lock id for meta kv election. Only effect when using pg_kvbackend.
|
/// Lock id for meta kv election. Only effect when using pg_kvbackend.
|
||||||
pub meta_election_lock_id: u64,
|
pub meta_election_lock_id: u64,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub node_max_idle_time: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
|
||||||
@@ -192,6 +195,7 @@ impl Default for MetasrvOptions {
|
|||||||
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
meta_table_name: DEFAULT_META_TABLE_NAME.to_string(),
|
||||||
#[cfg(feature = "pg_kvbackend")]
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
meta_election_lock_id: DEFAULT_META_ELECTION_LOCK_ID,
|
||||||
|
node_max_idle_time: Duration::from_secs(24 * 60 * 60),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -442,6 +446,10 @@ impl Metasrv {
|
|||||||
leadership_change_notifier.add_listener(self.wal_options_allocator.clone());
|
leadership_change_notifier.add_listener(self.wal_options_allocator.clone());
|
||||||
leadership_change_notifier
|
leadership_change_notifier
|
||||||
.add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
|
.add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
|
||||||
|
leadership_change_notifier.add_listener(Arc::new(NodeExpiryListener::new(
|
||||||
|
self.options.node_max_idle_time,
|
||||||
|
self.in_memory.clone(),
|
||||||
|
)));
|
||||||
if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
|
if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
|
||||||
leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
|
leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,10 +27,9 @@ use snafu::OptionExt;
|
|||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::sync::mpsc::Sender;
|
use tokio::sync::mpsc::Sender;
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
use tonic::{Request, Response, Streaming};
|
use tonic::{Request, Response, Status, Streaming};
|
||||||
|
|
||||||
use crate::error;
|
use crate::error::{self, Result};
|
||||||
use crate::error::Result;
|
|
||||||
use crate::handler::{HeartbeatHandlerGroup, Pusher, PusherId};
|
use crate::handler::{HeartbeatHandlerGroup, Pusher, PusherId};
|
||||||
use crate::metasrv::{Context, Metasrv};
|
use crate::metasrv::{Context, Metasrv};
|
||||||
use crate::metrics::METRIC_META_HEARTBEAT_RECV;
|
use crate::metrics::METRIC_META_HEARTBEAT_RECV;
|
||||||
@@ -68,13 +67,15 @@ impl heartbeat_server::Heartbeat for Metasrv {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if pusher_id.is_none() {
|
if pusher_id.is_none() {
|
||||||
pusher_id = register_pusher(&handler_group, header, tx.clone()).await;
|
pusher_id =
|
||||||
|
Some(register_pusher(&handler_group, header, tx.clone()).await);
|
||||||
}
|
}
|
||||||
if let Some(k) = &pusher_id {
|
if let Some(k) = &pusher_id {
|
||||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&[&k.to_string()]);
|
METRIC_META_HEARTBEAT_RECV.with_label_values(&[&k.to_string()]);
|
||||||
} else {
|
} else {
|
||||||
METRIC_META_HEARTBEAT_RECV.with_label_values(&["none"]);
|
METRIC_META_HEARTBEAT_RECV.with_label_values(&["none"]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = handler_group
|
let res = handler_group
|
||||||
.handle(req, ctx.clone())
|
.handle(req, ctx.clone())
|
||||||
.await
|
.await
|
||||||
@@ -107,6 +108,12 @@ impl heartbeat_server::Heartbeat for Metasrv {
|
|||||||
|
|
||||||
if is_not_leader {
|
if is_not_leader {
|
||||||
warn!("Quit because it is no longer the leader");
|
warn!("Quit because it is no longer the leader");
|
||||||
|
let _ = tx
|
||||||
|
.send(Err(Status::aborted(format!(
|
||||||
|
"The requested metasrv node is not leader, node addr: {}",
|
||||||
|
ctx.server_addr
|
||||||
|
))))
|
||||||
|
.await;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -173,13 +180,13 @@ async fn register_pusher(
|
|||||||
handler_group: &HeartbeatHandlerGroup,
|
handler_group: &HeartbeatHandlerGroup,
|
||||||
header: &RequestHeader,
|
header: &RequestHeader,
|
||||||
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||||
) -> Option<PusherId> {
|
) -> PusherId {
|
||||||
let role = header.role();
|
let role = header.role();
|
||||||
let id = get_node_id(header);
|
let id = get_node_id(header);
|
||||||
let pusher_id = PusherId::new(role, id);
|
let pusher_id = PusherId::new(role, id);
|
||||||
let pusher = Pusher::new(sender, header);
|
let pusher = Pusher::new(sender, header);
|
||||||
handler_group.register_pusher(pusher_id, pusher).await;
|
handler_group.register_pusher(pusher_id, pusher).await;
|
||||||
Some(pusher_id)
|
pusher_id
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -17,13 +17,15 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use api::v1::meta::{
|
use api::v1::meta::{
|
||||||
procedure_service_server, DdlTaskRequest as PbDdlTaskRequest,
|
procedure_service_server, DdlTaskRequest as PbDdlTaskRequest,
|
||||||
DdlTaskResponse as PbDdlTaskResponse, MigrateRegionRequest, MigrateRegionResponse,
|
DdlTaskResponse as PbDdlTaskResponse, Error, MigrateRegionRequest, MigrateRegionResponse,
|
||||||
ProcedureDetailRequest, ProcedureDetailResponse, ProcedureStateResponse, QueryProcedureRequest,
|
ProcedureDetailRequest, ProcedureDetailResponse, ProcedureStateResponse, QueryProcedureRequest,
|
||||||
|
ResponseHeader,
|
||||||
};
|
};
|
||||||
use common_meta::ddl::ExecutorContext;
|
use common_meta::ddl::ExecutorContext;
|
||||||
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest};
|
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest};
|
||||||
use common_meta::rpc::procedure;
|
use common_meta::rpc::procedure;
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use common_telemetry::warn;
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tonic::{Request, Response};
|
use tonic::{Request, Response};
|
||||||
|
|
||||||
use super::GrpcResult;
|
use super::GrpcResult;
|
||||||
@@ -37,6 +39,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
|||||||
&self,
|
&self,
|
||||||
request: Request<QueryProcedureRequest>,
|
request: Request<QueryProcedureRequest>,
|
||||||
) -> GrpcResult<ProcedureStateResponse> {
|
) -> GrpcResult<ProcedureStateResponse> {
|
||||||
|
if !self.is_leader() {
|
||||||
|
let resp = ProcedureStateResponse {
|
||||||
|
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
warn!("The current meta is not leader, but a `query procedure state` request have reached the meta. Detail: {:?}.", request);
|
||||||
|
return Ok(Response::new(resp));
|
||||||
|
}
|
||||||
|
|
||||||
let QueryProcedureRequest { header, pid, .. } = request.into_inner();
|
let QueryProcedureRequest { header, pid, .. } = request.into_inner();
|
||||||
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
||||||
let pid = pid.context(error::MissingRequiredParameterSnafu { param: "pid" })?;
|
let pid = pid.context(error::MissingRequiredParameterSnafu { param: "pid" })?;
|
||||||
@@ -57,6 +69,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn ddl(&self, request: Request<PbDdlTaskRequest>) -> GrpcResult<PbDdlTaskResponse> {
|
async fn ddl(&self, request: Request<PbDdlTaskRequest>) -> GrpcResult<PbDdlTaskResponse> {
|
||||||
|
if !self.is_leader() {
|
||||||
|
let resp = PbDdlTaskResponse {
|
||||||
|
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
warn!("The current meta is not leader, but a `ddl` request have reached the meta. Detail: {:?}.", request);
|
||||||
|
return Ok(Response::new(resp));
|
||||||
|
}
|
||||||
|
|
||||||
let PbDdlTaskRequest {
|
let PbDdlTaskRequest {
|
||||||
header,
|
header,
|
||||||
query_context,
|
query_context,
|
||||||
@@ -99,12 +121,15 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
|||||||
&self,
|
&self,
|
||||||
request: Request<MigrateRegionRequest>,
|
request: Request<MigrateRegionRequest>,
|
||||||
) -> GrpcResult<MigrateRegionResponse> {
|
) -> GrpcResult<MigrateRegionResponse> {
|
||||||
ensure!(
|
if !self.is_leader() {
|
||||||
self.meta_peer_client().is_leader(),
|
let resp = MigrateRegionResponse {
|
||||||
error::UnexpectedSnafu {
|
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||||
violated: "Trying to submit a region migration procedure to non-leader meta server"
|
..Default::default()
|
||||||
}
|
};
|
||||||
);
|
|
||||||
|
warn!("The current meta is not leader, but a `migrate` request have reached the meta. Detail: {:?}.", request);
|
||||||
|
return Ok(Response::new(resp));
|
||||||
|
}
|
||||||
|
|
||||||
let MigrateRegionRequest {
|
let MigrateRegionRequest {
|
||||||
header,
|
header,
|
||||||
@@ -150,6 +175,16 @@ impl procedure_service_server::ProcedureService for Metasrv {
|
|||||||
&self,
|
&self,
|
||||||
request: Request<ProcedureDetailRequest>,
|
request: Request<ProcedureDetailRequest>,
|
||||||
) -> GrpcResult<ProcedureDetailResponse> {
|
) -> GrpcResult<ProcedureDetailResponse> {
|
||||||
|
if !self.is_leader() {
|
||||||
|
let resp = ProcedureDetailResponse {
|
||||||
|
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
warn!("The current meta is not leader, but a `procedure details` request have reached the meta. Detail: {:?}.", request);
|
||||||
|
return Ok(Response::new(resp));
|
||||||
|
}
|
||||||
|
|
||||||
let ProcedureDetailRequest { header } = request.into_inner();
|
let ProcedureDetailRequest { header } = request.into_inner();
|
||||||
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
let _header = header.context(error::MissingRequestHeaderSnafu)?;
|
||||||
let metas = self
|
let metas = self
|
||||||
|
|||||||
@@ -142,6 +142,7 @@ impl DataRegion {
|
|||||||
c.column_id = new_column_id_start + delta as u32;
|
c.column_id = new_column_id_start + delta as u32;
|
||||||
c.column_schema.set_nullable();
|
c.column_schema.set_nullable();
|
||||||
match index_options {
|
match index_options {
|
||||||
|
IndexOptions::None => {}
|
||||||
IndexOptions::Inverted => {
|
IndexOptions::Inverted => {
|
||||||
c.column_schema.set_inverted_index(true);
|
c.column_schema.set_inverted_index(true);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use api::v1::SemanticType;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_time::{Timestamp, FOREVER};
|
use common_time::{Timestamp, FOREVER};
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::ColumnSchema;
|
use datatypes::schema::{ColumnSchema, SkippingIndexOptions};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use mito2::engine::MITO_ENGINE_NAME;
|
use mito2::engine::MITO_ENGINE_NAME;
|
||||||
use object_store::util::join_dir;
|
use object_store::util::join_dir;
|
||||||
@@ -55,6 +55,8 @@ use crate::error::{
|
|||||||
use crate::metrics::PHYSICAL_REGION_COUNT;
|
use crate::metrics::PHYSICAL_REGION_COUNT;
|
||||||
use crate::utils::{self, to_data_region_id, to_metadata_region_id};
|
use crate::utils::{self, to_data_region_id, to_metadata_region_id};
|
||||||
|
|
||||||
|
const DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY: u32 = 1024;
|
||||||
|
|
||||||
impl MetricEngineInner {
|
impl MetricEngineInner {
|
||||||
pub async fn create_regions(
|
pub async fn create_regions(
|
||||||
&self,
|
&self,
|
||||||
@@ -440,6 +442,7 @@ impl MetricEngineInner {
|
|||||||
///
|
///
|
||||||
/// Return `[table_id_col, tsid_col]`
|
/// Return `[table_id_col, tsid_col]`
|
||||||
fn internal_column_metadata() -> [ColumnMetadata; 2] {
|
fn internal_column_metadata() -> [ColumnMetadata; 2] {
|
||||||
|
// Safety: BloomFilter is a valid skipping index type
|
||||||
let metric_name_col = ColumnMetadata {
|
let metric_name_col = ColumnMetadata {
|
||||||
column_id: ReservedColumnId::table_id(),
|
column_id: ReservedColumnId::table_id(),
|
||||||
semantic_type: SemanticType::Tag,
|
semantic_type: SemanticType::Tag,
|
||||||
@@ -448,7 +451,11 @@ impl MetricEngineInner {
|
|||||||
ConcreteDataType::uint32_datatype(),
|
ConcreteDataType::uint32_datatype(),
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
.with_inverted_index(true),
|
.with_skipping_options(SkippingIndexOptions {
|
||||||
|
granularity: DEFAULT_TABLE_ID_SKIPPING_INDEX_GRANULARITY,
|
||||||
|
index_type: datatypes::schema::SkippingIndexType::BloomFilter,
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
};
|
};
|
||||||
let tsid_col = ColumnMetadata {
|
let tsid_col = ColumnMetadata {
|
||||||
column_id: ReservedColumnId::tsid(),
|
column_id: ReservedColumnId::tsid(),
|
||||||
|
|||||||
@@ -30,9 +30,10 @@ impl MetricEngineInner {
|
|||||||
pub async fn drop_region(
|
pub async fn drop_region(
|
||||||
&self,
|
&self,
|
||||||
region_id: RegionId,
|
region_id: RegionId,
|
||||||
_req: RegionDropRequest,
|
req: RegionDropRequest,
|
||||||
) -> Result<AffectedRows> {
|
) -> Result<AffectedRows> {
|
||||||
let data_region_id = utils::to_data_region_id(region_id);
|
let data_region_id = utils::to_data_region_id(region_id);
|
||||||
|
let fast_path = req.fast_path;
|
||||||
|
|
||||||
// enclose the guard in a block to prevent the guard from polluting the async context
|
// enclose the guard in a block to prevent the guard from polluting the async context
|
||||||
let (is_physical_region, is_physical_region_busy) = {
|
let (is_physical_region, is_physical_region_busy) = {
|
||||||
@@ -52,7 +53,7 @@ impl MetricEngineInner {
|
|||||||
|
|
||||||
if is_physical_region {
|
if is_physical_region {
|
||||||
// check if there is no logical region relates to this physical region
|
// check if there is no logical region relates to this physical region
|
||||||
if is_physical_region_busy {
|
if is_physical_region_busy && !fast_path {
|
||||||
// reject if there is any present logical region
|
// reject if there is any present logical region
|
||||||
return Err(PhysicalRegionBusySnafu {
|
return Err(PhysicalRegionBusySnafu {
|
||||||
region_id: data_region_id,
|
region_id: data_region_id,
|
||||||
@@ -60,9 +61,21 @@ impl MetricEngineInner {
|
|||||||
.build());
|
.build());
|
||||||
}
|
}
|
||||||
|
|
||||||
self.drop_physical_region(data_region_id).await
|
return self.drop_physical_region(data_region_id).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if fast_path {
|
||||||
|
// for fast path, we don't delete the metadata in the metadata region.
|
||||||
|
// it only remove the logical region from the engine state.
|
||||||
|
//
|
||||||
|
// The drop database procedure will ensure the metadata region and data region are dropped eventually.
|
||||||
|
self.state
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.remove_logical_region(region_id)?;
|
||||||
|
|
||||||
|
Ok(0)
|
||||||
} else {
|
} else {
|
||||||
// cannot merge these two `if` otherwise the stupid type checker will complain
|
|
||||||
let metadata_region_id = self
|
let metadata_region_id = self
|
||||||
.state
|
.state
|
||||||
.read()
|
.read()
|
||||||
@@ -87,13 +100,16 @@ impl MetricEngineInner {
|
|||||||
// Since the physical regions are going to be dropped, we don't need to
|
// Since the physical regions are going to be dropped, we don't need to
|
||||||
// update the contents in metadata region.
|
// update the contents in metadata region.
|
||||||
self.mito
|
self.mito
|
||||||
.handle_request(data_region_id, RegionRequest::Drop(RegionDropRequest {}))
|
.handle_request(
|
||||||
|
data_region_id,
|
||||||
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
||||||
self.mito
|
self.mito
|
||||||
.handle_request(
|
.handle_request(
|
||||||
metadata_region_id,
|
metadata_region_id,
|
||||||
RegionRequest::Drop(RegionDropRequest {}),
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
.with_context(|_| CloseMitoRegionSnafu { region_id })?;
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ pub struct PhysicalRegionOptions {
|
|||||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||||
pub enum IndexOptions {
|
pub enum IndexOptions {
|
||||||
#[default]
|
#[default]
|
||||||
|
None,
|
||||||
Inverted,
|
Inverted,
|
||||||
Skipping {
|
Skipping {
|
||||||
granularity: u32,
|
granularity: u32,
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
use object_store::util::{join_dir, with_instrument_layers};
|
use object_store::util::{join_dir, with_instrument_layers};
|
||||||
@@ -42,6 +43,29 @@ pub type AccessLayerRef = Arc<AccessLayer>;
|
|||||||
/// SST write results.
|
/// SST write results.
|
||||||
pub type SstInfoArray = SmallVec<[SstInfo; 2]>;
|
pub type SstInfoArray = SmallVec<[SstInfo; 2]>;
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct Metrics {
|
||||||
|
pub read: Duration,
|
||||||
|
pub write: Duration,
|
||||||
|
pub convert: Duration,
|
||||||
|
pub index_update: Duration,
|
||||||
|
pub index_finish: Duration,
|
||||||
|
pub close: Duration,
|
||||||
|
pub num_series: usize,
|
||||||
|
|
||||||
|
// SST Opendal metrics.
|
||||||
|
pub opendal_create_cost: Duration,
|
||||||
|
pub opendal_num_writes: usize,
|
||||||
|
pub opendal_write_cost: Duration,
|
||||||
|
pub opendal_complete_cost: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Metrics {
|
||||||
|
pub fn sum(&self) -> Duration {
|
||||||
|
self.read + self.write + self.convert + self.index_update + self.index_finish + self.close
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A layer to access SST files under the same directory.
|
/// A layer to access SST files under the same directory.
|
||||||
pub struct AccessLayer {
|
pub struct AccessLayer {
|
||||||
region_dir: String,
|
region_dir: String,
|
||||||
@@ -121,10 +145,11 @@ impl AccessLayer {
|
|||||||
/// Writes a SST with specific `file_id` and `metadata` to the layer.
|
/// Writes a SST with specific `file_id` and `metadata` to the layer.
|
||||||
///
|
///
|
||||||
/// Returns the info of the SST. If no data written, returns None.
|
/// Returns the info of the SST. If no data written, returns None.
|
||||||
pub(crate) async fn write_sst(
|
pub async fn write_sst(
|
||||||
&self,
|
&self,
|
||||||
request: SstWriteRequest,
|
request: SstWriteRequest,
|
||||||
write_opts: &WriteOptions,
|
write_opts: &WriteOptions,
|
||||||
|
metrics: &mut Metrics,
|
||||||
) -> Result<SstInfoArray> {
|
) -> Result<SstInfoArray> {
|
||||||
let region_id = request.metadata.region_id;
|
let region_id = request.metadata.region_id;
|
||||||
let cache_manager = request.cache_manager.clone();
|
let cache_manager = request.cache_manager.clone();
|
||||||
@@ -146,11 +171,14 @@ impl AccessLayer {
|
|||||||
} else {
|
} else {
|
||||||
// Write cache is disabled.
|
// Write cache is disabled.
|
||||||
let store = self.object_store.clone();
|
let store = self.object_store.clone();
|
||||||
|
let path_provider = RegionFilePathFactory::new(self.region_dir.clone());
|
||||||
let indexer_builder = IndexerBuilderImpl {
|
let indexer_builder = IndexerBuilderImpl {
|
||||||
op_type: request.op_type,
|
op_type: request.op_type,
|
||||||
metadata: request.metadata.clone(),
|
metadata: request.metadata.clone(),
|
||||||
row_group_size: write_opts.row_group_size,
|
row_group_size: write_opts.row_group_size,
|
||||||
puffin_manager: self.puffin_manager_factory.build(store),
|
puffin_manager: self
|
||||||
|
.puffin_manager_factory
|
||||||
|
.build(store, path_provider.clone()),
|
||||||
intermediate_manager: self.intermediate_manager.clone(),
|
intermediate_manager: self.intermediate_manager.clone(),
|
||||||
index_options: request.index_options,
|
index_options: request.index_options,
|
||||||
inverted_index_config: request.inverted_index_config,
|
inverted_index_config: request.inverted_index_config,
|
||||||
@@ -161,14 +189,19 @@ impl AccessLayer {
|
|||||||
self.object_store.clone(),
|
self.object_store.clone(),
|
||||||
request.metadata,
|
request.metadata,
|
||||||
indexer_builder,
|
indexer_builder,
|
||||||
RegionFilePathFactory {
|
path_provider,
|
||||||
region_dir: self.region_dir.clone(),
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
writer
|
let sst_info = writer
|
||||||
.write_all(request.source, request.max_sequence, write_opts)
|
.write_all(request.source, request.max_sequence, write_opts, metrics)
|
||||||
.await?
|
.await?;
|
||||||
|
let opendal_metrics = writer.opendal_metrics_val();
|
||||||
|
metrics.opendal_create_cost += opendal_metrics.create_cost;
|
||||||
|
metrics.opendal_num_writes += opendal_metrics.num_writes;
|
||||||
|
metrics.opendal_write_cost += opendal_metrics.write_cost;
|
||||||
|
metrics.opendal_complete_cost += opendal_metrics.complete_cost;
|
||||||
|
|
||||||
|
sst_info
|
||||||
};
|
};
|
||||||
|
|
||||||
// Put parquet metadata to cache manager.
|
// Put parquet metadata to cache manager.
|
||||||
@@ -188,28 +221,53 @@ impl AccessLayer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to build an [AccessLayerRef] with internal index managers.
|
||||||
|
///
|
||||||
|
/// This is a convenience constructor intended for tooling that needs to
|
||||||
|
/// interact with SSTs without wiring all indexing internals manually.
|
||||||
|
pub async fn build_access_layer(
|
||||||
|
region_dir: &str,
|
||||||
|
object_store: ObjectStore,
|
||||||
|
config: &crate::config::MitoConfig,
|
||||||
|
) -> Result<AccessLayerRef> {
|
||||||
|
let puffin_manager_factory = PuffinManagerFactory::new(
|
||||||
|
&config.index.aux_path,
|
||||||
|
config.index.staging_size.as_bytes(),
|
||||||
|
Some(config.index.write_buffer_size.as_bytes() as _),
|
||||||
|
config.index.staging_ttl,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let intermediate_manager = IntermediateManager::init_fs(&config.index.aux_path).await?;
|
||||||
|
Ok(Arc::new(AccessLayer::new(
|
||||||
|
region_dir,
|
||||||
|
object_store,
|
||||||
|
puffin_manager_factory,
|
||||||
|
intermediate_manager,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
/// `OperationType` represents the origin of the `SstWriteRequest`.
|
/// `OperationType` represents the origin of the `SstWriteRequest`.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||||
pub(crate) enum OperationType {
|
pub enum OperationType {
|
||||||
Flush,
|
Flush,
|
||||||
Compact,
|
Compact,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Contents to build a SST.
|
/// Contents to build a SST.
|
||||||
pub(crate) struct SstWriteRequest {
|
pub struct SstWriteRequest {
|
||||||
pub(crate) op_type: OperationType,
|
pub op_type: OperationType,
|
||||||
pub(crate) metadata: RegionMetadataRef,
|
pub metadata: RegionMetadataRef,
|
||||||
pub(crate) source: Source,
|
pub source: Source,
|
||||||
pub(crate) cache_manager: CacheManagerRef,
|
pub cache_manager: CacheManagerRef,
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub(crate) storage: Option<String>,
|
pub storage: Option<String>,
|
||||||
pub(crate) max_sequence: Option<SequenceNumber>,
|
pub max_sequence: Option<SequenceNumber>,
|
||||||
|
|
||||||
/// Configs for index
|
/// Configs for index
|
||||||
pub(crate) index_options: IndexOptions,
|
pub index_options: IndexOptions,
|
||||||
pub(crate) inverted_index_config: InvertedIndexConfig,
|
pub inverted_index_config: InvertedIndexConfig,
|
||||||
pub(crate) fulltext_index_config: FulltextIndexConfig,
|
pub fulltext_index_config: FulltextIndexConfig,
|
||||||
pub(crate) bloom_filter_index_config: BloomFilterConfig,
|
pub bloom_filter_index_config: BloomFilterConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> {
|
pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> {
|
||||||
@@ -248,8 +306,18 @@ pub trait FilePathProvider: Send + Sync {
|
|||||||
/// Path provider that builds paths in local write cache.
|
/// Path provider that builds paths in local write cache.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(crate) struct WriteCachePathProvider {
|
pub(crate) struct WriteCachePathProvider {
|
||||||
pub(crate) region_id: RegionId,
|
region_id: RegionId,
|
||||||
pub(crate) file_cache: FileCacheRef,
|
file_cache: FileCacheRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WriteCachePathProvider {
|
||||||
|
/// Creates a new `WriteCachePathProvider` instance.
|
||||||
|
pub fn new(region_id: RegionId, file_cache: FileCacheRef) -> Self {
|
||||||
|
Self {
|
||||||
|
region_id,
|
||||||
|
file_cache,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FilePathProvider for WriteCachePathProvider {
|
impl FilePathProvider for WriteCachePathProvider {
|
||||||
@@ -267,7 +335,14 @@ impl FilePathProvider for WriteCachePathProvider {
|
|||||||
/// Path provider that builds paths in region storage path.
|
/// Path provider that builds paths in region storage path.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct RegionFilePathFactory {
|
pub(crate) struct RegionFilePathFactory {
|
||||||
pub(crate) region_dir: String,
|
region_dir: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RegionFilePathFactory {
|
||||||
|
/// Creates a new `RegionFilePathFactory` instance.
|
||||||
|
pub fn new(region_dir: String) -> Self {
|
||||||
|
Self { region_dir }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FilePathProvider for RegionFilePathFactory {
|
impl FilePathProvider for RegionFilePathFactory {
|
||||||
|
|||||||
3
src/mito2/src/cache/file_cache.rs
vendored
3
src/mito2/src/cache/file_cache.rs
vendored
@@ -187,9 +187,12 @@ impl FileCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a file from the cache explicitly.
|
/// Removes a file from the cache explicitly.
|
||||||
|
/// It always tries to remove the file from the local store because we may not have the file
|
||||||
|
/// in the memory index if upload is failed.
|
||||||
pub(crate) async fn remove(&self, key: IndexKey) {
|
pub(crate) async fn remove(&self, key: IndexKey) {
|
||||||
let file_path = self.cache_file_path(key);
|
let file_path = self.cache_file_path(key);
|
||||||
self.memory_index.remove(&key).await;
|
self.memory_index.remove(&key).await;
|
||||||
|
// Always delete the file from the local store.
|
||||||
if let Err(e) = self.local_store.delete(&file_path).await {
|
if let Err(e) = self.local_store.delete(&file_path).await {
|
||||||
warn!(e; "Failed to delete a cached file {}", file_path);
|
warn!(e; "Failed to delete a cached file {}", file_path);
|
||||||
}
|
}
|
||||||
|
|||||||
106
src/mito2/src/cache/write_cache.rs
vendored
106
src/mito2/src/cache/write_cache.rs
vendored
@@ -22,6 +22,7 @@ use common_telemetry::{debug, info};
|
|||||||
use futures::AsyncWriteExt;
|
use futures::AsyncWriteExt;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
use crate::access_layer::{
|
use crate::access_layer::{
|
||||||
new_fs_cache_store, FilePathProvider, RegionFilePathFactory, SstInfoArray, SstWriteRequest,
|
new_fs_cache_store, FilePathProvider, RegionFilePathFactory, SstInfoArray, SstWriteRequest,
|
||||||
@@ -39,6 +40,7 @@ use crate::sst::index::IndexerBuilderImpl;
|
|||||||
use crate::sst::parquet::writer::ParquetWriter;
|
use crate::sst::parquet::writer::ParquetWriter;
|
||||||
use crate::sst::parquet::WriteOptions;
|
use crate::sst::parquet::WriteOptions;
|
||||||
use crate::sst::{DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY};
|
use crate::sst::{DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY};
|
||||||
|
use crate::Metrics;
|
||||||
|
|
||||||
/// A cache for uploading files to remote object stores.
|
/// A cache for uploading files to remote object stores.
|
||||||
///
|
///
|
||||||
@@ -114,15 +116,14 @@ impl WriteCache {
|
|||||||
let region_id = write_request.metadata.region_id;
|
let region_id = write_request.metadata.region_id;
|
||||||
|
|
||||||
let store = self.file_cache.local_store();
|
let store = self.file_cache.local_store();
|
||||||
let path_provider = WriteCachePathProvider {
|
let path_provider = WriteCachePathProvider::new(region_id, self.file_cache.clone());
|
||||||
file_cache: self.file_cache.clone(),
|
|
||||||
region_id,
|
|
||||||
};
|
|
||||||
let indexer = IndexerBuilderImpl {
|
let indexer = IndexerBuilderImpl {
|
||||||
op_type: write_request.op_type,
|
op_type: write_request.op_type,
|
||||||
metadata: write_request.metadata.clone(),
|
metadata: write_request.metadata.clone(),
|
||||||
row_group_size: write_opts.row_group_size,
|
row_group_size: write_opts.row_group_size,
|
||||||
puffin_manager: self.puffin_manager_factory.build(store),
|
puffin_manager: self
|
||||||
|
.puffin_manager_factory
|
||||||
|
.build(store, path_provider.clone()),
|
||||||
intermediate_manager: self.intermediate_manager.clone(),
|
intermediate_manager: self.intermediate_manager.clone(),
|
||||||
index_options: write_request.index_options,
|
index_options: write_request.index_options,
|
||||||
inverted_index_config: write_request.inverted_index_config,
|
inverted_index_config: write_request.inverted_index_config,
|
||||||
@@ -140,7 +141,12 @@ impl WriteCache {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
let sst_info = writer
|
let sst_info = writer
|
||||||
.write_all(write_request.source, write_request.max_sequence, write_opts)
|
.write_all(
|
||||||
|
write_request.source,
|
||||||
|
write_request.max_sequence,
|
||||||
|
write_opts,
|
||||||
|
&mut Metrics::default(),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
timer.stop_and_record();
|
timer.stop_and_record();
|
||||||
@@ -150,24 +156,41 @@ impl WriteCache {
|
|||||||
return Ok(sst_info);
|
return Ok(sst_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut upload_tracker = UploadTracker::new(region_id);
|
||||||
|
let mut err = None;
|
||||||
let remote_store = &upload_request.remote_store;
|
let remote_store = &upload_request.remote_store;
|
||||||
for sst in &sst_info {
|
for sst in &sst_info {
|
||||||
let parquet_key = IndexKey::new(region_id, sst.file_id, FileType::Parquet);
|
let parquet_key = IndexKey::new(region_id, sst.file_id, FileType::Parquet);
|
||||||
let parquet_path = upload_request
|
let parquet_path = upload_request
|
||||||
.dest_path_provider
|
.dest_path_provider
|
||||||
.build_sst_file_path(sst.file_id);
|
.build_sst_file_path(sst.file_id);
|
||||||
self.upload(parquet_key, &parquet_path, remote_store)
|
if let Err(e) = self.upload(parquet_key, &parquet_path, remote_store).await {
|
||||||
.await?;
|
err = Some(e);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
upload_tracker.push_uploaded_file(parquet_path);
|
||||||
|
|
||||||
if sst.index_metadata.file_size > 0 {
|
if sst.index_metadata.file_size > 0 {
|
||||||
let puffin_key = IndexKey::new(region_id, sst.file_id, FileType::Puffin);
|
let puffin_key = IndexKey::new(region_id, sst.file_id, FileType::Puffin);
|
||||||
let puffin_path = &upload_request
|
let puffin_path = upload_request
|
||||||
.dest_path_provider
|
.dest_path_provider
|
||||||
.build_index_file_path(sst.file_id);
|
.build_index_file_path(sst.file_id);
|
||||||
self.upload(puffin_key, puffin_path, remote_store).await?;
|
if let Err(e) = self.upload(puffin_key, &puffin_path, remote_store).await {
|
||||||
|
err = Some(e);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
upload_tracker.push_uploaded_file(puffin_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(err) = err {
|
||||||
|
// Cleans files on failure.
|
||||||
|
upload_tracker
|
||||||
|
.clean(&sst_info, &self.file_cache, remote_store)
|
||||||
|
.await;
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(sst_info)
|
Ok(sst_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,6 +356,61 @@ pub struct SstUploadRequest {
|
|||||||
pub remote_store: ObjectStore,
|
pub remote_store: ObjectStore,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A structs to track files to upload and clean them if upload failed.
|
||||||
|
struct UploadTracker {
|
||||||
|
/// Id of the region to track.
|
||||||
|
region_id: RegionId,
|
||||||
|
/// Paths of files uploaded successfully.
|
||||||
|
files_uploaded: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UploadTracker {
|
||||||
|
/// Creates a new instance of `UploadTracker` for a given region.
|
||||||
|
fn new(region_id: RegionId) -> Self {
|
||||||
|
Self {
|
||||||
|
region_id,
|
||||||
|
files_uploaded: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a file path to the list of uploaded files.
|
||||||
|
fn push_uploaded_file(&mut self, path: String) {
|
||||||
|
self.files_uploaded.push(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cleans uploaded files and files in the file cache at best effort.
|
||||||
|
async fn clean(
|
||||||
|
&self,
|
||||||
|
sst_info: &SstInfoArray,
|
||||||
|
file_cache: &FileCacheRef,
|
||||||
|
remote_store: &ObjectStore,
|
||||||
|
) {
|
||||||
|
common_telemetry::info!(
|
||||||
|
"Start cleaning files on upload failure, region: {}, num_ssts: {}",
|
||||||
|
self.region_id,
|
||||||
|
sst_info.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Cleans files in the file cache first.
|
||||||
|
for sst in sst_info {
|
||||||
|
let parquet_key = IndexKey::new(self.region_id, sst.file_id, FileType::Parquet);
|
||||||
|
file_cache.remove(parquet_key).await;
|
||||||
|
|
||||||
|
if sst.index_metadata.file_size > 0 {
|
||||||
|
let puffin_key = IndexKey::new(self.region_id, sst.file_id, FileType::Puffin);
|
||||||
|
file_cache.remove(puffin_key).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleans uploaded files.
|
||||||
|
for file_path in &self.files_uploaded {
|
||||||
|
if let Err(e) = remote_store.delete(file_path).await {
|
||||||
|
common_telemetry::error!(e; "Failed to delete file {}", file_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use common_test_util::temp_dir::create_temp_dir;
|
use common_test_util::temp_dir::create_temp_dir;
|
||||||
@@ -355,9 +433,7 @@ mod tests {
|
|||||||
// and now just use local file system to mock.
|
// and now just use local file system to mock.
|
||||||
let mut env = TestEnv::new();
|
let mut env = TestEnv::new();
|
||||||
let mock_store = env.init_object_store_manager();
|
let mock_store = env.init_object_store_manager();
|
||||||
let path_provider = RegionFilePathFactory {
|
let path_provider = RegionFilePathFactory::new("test".to_string());
|
||||||
region_dir: "test".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let local_dir = create_temp_dir("");
|
let local_dir = create_temp_dir("");
|
||||||
let local_store = new_fs_store(local_dir.path().to_str().unwrap());
|
let local_store = new_fs_store(local_dir.path().to_str().unwrap());
|
||||||
@@ -488,9 +564,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
let upload_request = SstUploadRequest {
|
let upload_request = SstUploadRequest {
|
||||||
dest_path_provider: RegionFilePathFactory {
|
dest_path_provider: RegionFilePathFactory::new(data_home.clone()),
|
||||||
region_dir: data_home.clone(),
|
|
||||||
},
|
|
||||||
remote_store: mock_store.clone(),
|
remote_store: mock_store.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::metadata::RegionMetadataRef;
|
use store_api::metadata::RegionMetadataRef;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
use crate::access_layer::{AccessLayer, AccessLayerRef, OperationType, SstWriteRequest};
|
use crate::access_layer::{AccessLayer, AccessLayerRef, Metrics, OperationType, SstWriteRequest};
|
||||||
use crate::cache::{CacheManager, CacheManagerRef};
|
use crate::cache::{CacheManager, CacheManagerRef};
|
||||||
use crate::compaction::picker::{new_picker, PickerOutput};
|
use crate::compaction::picker::{new_picker, PickerOutput};
|
||||||
use crate::compaction::{find_ttl, CompactionSstReaderBuilder};
|
use crate::compaction::{find_ttl, CompactionSstReaderBuilder};
|
||||||
@@ -340,6 +340,7 @@ impl Compactor for DefaultCompactor {
|
|||||||
bloom_filter_index_config,
|
bloom_filter_index_config,
|
||||||
},
|
},
|
||||||
&write_opts,
|
&write_opts,
|
||||||
|
&mut Metrics::default(),
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|||||||
@@ -56,7 +56,10 @@ async fn test_engine_drop_region() {
|
|||||||
|
|
||||||
// It's okay to drop a region doesn't exist.
|
// It's okay to drop a region doesn't exist.
|
||||||
engine
|
engine
|
||||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
.handle_request(
|
||||||
|
region_id,
|
||||||
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap_err();
|
.unwrap_err();
|
||||||
|
|
||||||
@@ -86,7 +89,10 @@ async fn test_engine_drop_region() {
|
|||||||
|
|
||||||
// drop the created region.
|
// drop the created region.
|
||||||
engine
|
engine
|
||||||
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
|
.handle_request(
|
||||||
|
region_id,
|
||||||
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(!engine.is_region_exists(region_id));
|
assert!(!engine.is_region_exists(region_id));
|
||||||
@@ -192,7 +198,10 @@ async fn test_engine_drop_region_for_custom_store() {
|
|||||||
|
|
||||||
// Drop the custom region.
|
// Drop the custom region.
|
||||||
engine
|
engine
|
||||||
.handle_request(custom_region_id, RegionRequest::Drop(RegionDropRequest {}))
|
.handle_request(
|
||||||
|
custom_region_id,
|
||||||
|
RegionRequest::Drop(RegionDropRequest { fast_path: false }),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(!engine.is_region_exists(custom_region_id));
|
assert!(!engine.is_region_exists(custom_region_id));
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ use store_api::storage::RegionId;
|
|||||||
use strum::IntoStaticStr;
|
use strum::IntoStaticStr;
|
||||||
use tokio::sync::{mpsc, watch};
|
use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
use crate::access_layer::{AccessLayerRef, OperationType, SstWriteRequest};
|
use crate::access_layer::{AccessLayerRef, Metrics, OperationType, SstWriteRequest};
|
||||||
use crate::cache::CacheManagerRef;
|
use crate::cache::CacheManagerRef;
|
||||||
use crate::config::MitoConfig;
|
use crate::config::MitoConfig;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
@@ -366,7 +366,7 @@ impl RegionFlushTask {
|
|||||||
|
|
||||||
let ssts_written = self
|
let ssts_written = self
|
||||||
.access_layer
|
.access_layer
|
||||||
.write_sst(write_request, &write_opts)
|
.write_sst(write_request, &write_opts, &mut Metrics::default())
|
||||||
.await?;
|
.await?;
|
||||||
if ssts_written.is_empty() {
|
if ssts_written.is_empty() {
|
||||||
// No data written.
|
// No data written.
|
||||||
|
|||||||
@@ -44,6 +44,12 @@ mod time_provider;
|
|||||||
pub mod wal;
|
pub mod wal;
|
||||||
mod worker;
|
mod worker;
|
||||||
|
|
||||||
|
// Public re-exports for tooling convenience
|
||||||
|
pub use access_layer::{
|
||||||
|
build_access_layer, AccessLayer, AccessLayerRef, Metrics, OperationType, SstWriteRequest,
|
||||||
|
};
|
||||||
|
pub use cache::{CacheManager, CacheManagerRef};
|
||||||
|
|
||||||
#[cfg_attr(doc, aquamarine::aquamarine)]
|
#[cfg_attr(doc, aquamarine::aquamarine)]
|
||||||
/// # Mito developer document
|
/// # Mito developer document
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ use tokio::sync::{mpsc, Semaphore};
|
|||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
use crate::access_layer::AccessLayerRef;
|
use crate::access_layer::AccessLayerRef;
|
||||||
use crate::cache::file_cache::FileCacheRef;
|
|
||||||
use crate::cache::CacheStrategy;
|
use crate::cache::CacheStrategy;
|
||||||
use crate::config::DEFAULT_SCAN_CHANNEL_SIZE;
|
use crate::config::DEFAULT_SCAN_CHANNEL_SIZE;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -427,12 +426,7 @@ impl ScanRegion {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let file_cache = || -> Option<FileCacheRef> {
|
let file_cache = self.cache_strategy.write_cache().map(|w| w.file_cache());
|
||||||
let write_cache = self.cache_strategy.write_cache()?;
|
|
||||||
let file_cache = write_cache.file_cache();
|
|
||||||
Some(file_cache)
|
|
||||||
}();
|
|
||||||
|
|
||||||
let inverted_index_cache = self.cache_strategy.inverted_index_cache().cloned();
|
let inverted_index_cache = self.cache_strategy.inverted_index_cache().cloned();
|
||||||
|
|
||||||
let puffin_metadata_cache = self.cache_strategy.puffin_metadata_cache().cloned();
|
let puffin_metadata_cache = self.cache_strategy.puffin_metadata_cache().cloned();
|
||||||
@@ -467,14 +461,8 @@ impl ScanRegion {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let file_cache = || -> Option<FileCacheRef> {
|
let file_cache = self.cache_strategy.write_cache().map(|w| w.file_cache());
|
||||||
let write_cache = self.cache_strategy.write_cache()?;
|
|
||||||
let file_cache = write_cache.file_cache();
|
|
||||||
Some(file_cache)
|
|
||||||
}();
|
|
||||||
|
|
||||||
let bloom_filter_index_cache = self.cache_strategy.bloom_filter_index_cache().cloned();
|
let bloom_filter_index_cache = self.cache_strategy.bloom_filter_index_cache().cloned();
|
||||||
|
|
||||||
let puffin_metadata_cache = self.cache_strategy.puffin_metadata_cache().cloned();
|
let puffin_metadata_cache = self.cache_strategy.puffin_metadata_cache().cloned();
|
||||||
|
|
||||||
BloomFilterIndexApplierBuilder::new(
|
BloomFilterIndexApplierBuilder::new(
|
||||||
@@ -499,12 +487,18 @@ impl ScanRegion {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let file_cache = self.cache_strategy.write_cache().map(|w| w.file_cache());
|
||||||
|
let puffin_metadata_cache = self.cache_strategy.puffin_metadata_cache().cloned();
|
||||||
|
|
||||||
FulltextIndexApplierBuilder::new(
|
FulltextIndexApplierBuilder::new(
|
||||||
self.access_layer.region_dir().to_string(),
|
self.access_layer.region_dir().to_string(),
|
||||||
|
self.version.metadata.region_id,
|
||||||
self.access_layer.object_store().clone(),
|
self.access_layer.object_store().clone(),
|
||||||
self.access_layer.puffin_manager_factory().clone(),
|
self.access_layer.puffin_manager_factory().clone(),
|
||||||
self.version.metadata.as_ref(),
|
self.version.metadata.as_ref(),
|
||||||
)
|
)
|
||||||
|
.with_file_cache(file_cache)
|
||||||
|
.with_puffin_metadata_cache(puffin_metadata_cache)
|
||||||
.build(&self.request.filters)
|
.build(&self.request.filters)
|
||||||
.inspect_err(|err| warn!(err; "Failed to build fulltext index applier"))
|
.inspect_err(|err| warn!(err; "Failed to build fulltext index applier"))
|
||||||
.ok()
|
.ok()
|
||||||
|
|||||||
@@ -35,8 +35,8 @@ use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataRef};
|
|||||||
use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
|
use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
|
||||||
use store_api::region_request::{
|
use store_api::region_request::{
|
||||||
AffectedRows, RegionAlterRequest, RegionCatchupRequest, RegionCloseRequest,
|
AffectedRows, RegionAlterRequest, RegionCatchupRequest, RegionCloseRequest,
|
||||||
RegionCompactRequest, RegionCreateRequest, RegionDropRequest, RegionFlushRequest,
|
RegionCompactRequest, RegionCreateRequest, RegionFlushRequest, RegionOpenRequest,
|
||||||
RegionOpenRequest, RegionRequest, RegionTruncateRequest,
|
RegionRequest, RegionTruncateRequest,
|
||||||
};
|
};
|
||||||
use store_api::storage::{RegionId, SequenceNumber};
|
use store_api::storage::{RegionId, SequenceNumber};
|
||||||
use tokio::sync::oneshot::{self, Receiver, Sender};
|
use tokio::sync::oneshot::{self, Receiver, Sender};
|
||||||
@@ -624,10 +624,10 @@ impl WorkerRequest {
|
|||||||
sender: sender.into(),
|
sender: sender.into(),
|
||||||
request: DdlRequest::Create(v),
|
request: DdlRequest::Create(v),
|
||||||
}),
|
}),
|
||||||
RegionRequest::Drop(v) => WorkerRequest::Ddl(SenderDdlRequest {
|
RegionRequest::Drop(_) => WorkerRequest::Ddl(SenderDdlRequest {
|
||||||
region_id,
|
region_id,
|
||||||
sender: sender.into(),
|
sender: sender.into(),
|
||||||
request: DdlRequest::Drop(v),
|
request: DdlRequest::Drop,
|
||||||
}),
|
}),
|
||||||
RegionRequest::Open(v) => WorkerRequest::Ddl(SenderDdlRequest {
|
RegionRequest::Open(v) => WorkerRequest::Ddl(SenderDdlRequest {
|
||||||
region_id,
|
region_id,
|
||||||
@@ -690,7 +690,7 @@ impl WorkerRequest {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) enum DdlRequest {
|
pub(crate) enum DdlRequest {
|
||||||
Create(RegionCreateRequest),
|
Create(RegionCreateRequest),
|
||||||
Drop(RegionDropRequest),
|
Drop,
|
||||||
Open((RegionOpenRequest, Option<WalEntryReceiver>)),
|
Open((RegionOpenRequest, Option<WalEntryReceiver>)),
|
||||||
Close(RegionCloseRequest),
|
Close(RegionCloseRequest),
|
||||||
Alter(RegionAlterRequest),
|
Alter(RegionAlterRequest),
|
||||||
|
|||||||
@@ -174,31 +174,8 @@ impl FileMeta {
|
|||||||
.contains(&IndexType::BloomFilterIndex)
|
.contains(&IndexType::BloomFilterIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the size of the inverted index file
|
pub fn index_file_size(&self) -> u64 {
|
||||||
pub fn inverted_index_size(&self) -> Option<u64> {
|
self.index_file_size
|
||||||
if self.available_indexes.len() == 1 && self.inverted_index_available() {
|
|
||||||
Some(self.index_file_size)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the size of the fulltext index file
|
|
||||||
pub fn fulltext_index_size(&self) -> Option<u64> {
|
|
||||||
if self.available_indexes.len() == 1 && self.fulltext_index_available() {
|
|
||||||
Some(self.index_file_size)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the size of the bloom filter index file
|
|
||||||
pub fn bloom_filter_index_size(&self) -> Option<u64> {
|
|
||||||
if self.available_indexes.len() == 1 && self.bloom_filter_index_available() {
|
|
||||||
Some(self.index_file_size)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user