mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-03 20:02:54 +00:00
Compare commits
94 Commits
v0.5.0-nig
...
v0.5.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53a5864944 | ||
|
|
5b70881098 | ||
|
|
06d273b75a | ||
|
|
b382900c5c | ||
|
|
c79bb5a936 | ||
|
|
7e0dcfc797 | ||
|
|
51ddebdc73 | ||
|
|
e9f7579091 | ||
|
|
f387a09535 | ||
|
|
cf94d3295f | ||
|
|
0a91335e24 | ||
|
|
6fd04e38a3 | ||
|
|
bbaae9223a | ||
|
|
060864d0c1 | ||
|
|
395632c874 | ||
|
|
0dca63bc7b | ||
|
|
7323d727c9 | ||
|
|
68f92ecf08 | ||
|
|
39d52f25bf | ||
|
|
fb8d0c6ce5 | ||
|
|
ce867fb583 | ||
|
|
04a8fc5138 | ||
|
|
479ffe5a0f | ||
|
|
4b48c716b2 | ||
|
|
a9137b77f0 | ||
|
|
5f3bbdca4f | ||
|
|
7bd137f398 | ||
|
|
15a0775a3c | ||
|
|
180bc64cb0 | ||
|
|
e3320c531d | ||
|
|
d77003fb3b | ||
|
|
54ed7529ca | ||
|
|
465c8f714e | ||
|
|
88eb69530a | ||
|
|
36c0742c45 | ||
|
|
84bcca9117 | ||
|
|
d2f3793d15 | ||
|
|
000e1471eb | ||
|
|
d0ff8ab191 | ||
|
|
bd177b8cc4 | ||
|
|
958ff3f185 | ||
|
|
5d8b0e8154 | ||
|
|
84490f56b8 | ||
|
|
cb97768004 | ||
|
|
f08a35d6b9 | ||
|
|
e8adaaf5f7 | ||
|
|
a63fa76b7b | ||
|
|
102e4c975d | ||
|
|
16a3257ada | ||
|
|
01fdbf3626 | ||
|
|
97897aaf9b | ||
|
|
1fc42a681f | ||
|
|
fbc8f56eaa | ||
|
|
44280f7c9d | ||
|
|
0fbde48655 | ||
|
|
9dcfd28f61 | ||
|
|
82dbc3e1ae | ||
|
|
4d478658b5 | ||
|
|
89ebe47cd9 | ||
|
|
212ea2c25c | ||
|
|
1658d088ab | ||
|
|
346b57cf10 | ||
|
|
e1dcf83326 | ||
|
|
b5d9d635eb | ||
|
|
88dd78a69c | ||
|
|
6439b929b3 | ||
|
|
ba15c14103 | ||
|
|
d57b144b2f | ||
|
|
46e106bcc3 | ||
|
|
a7507a2b12 | ||
|
|
5b8e5066a0 | ||
|
|
dcd481e6a4 | ||
|
|
3217b56cc1 | ||
|
|
eccad647d0 | ||
|
|
829db8c5c1 | ||
|
|
9056c3a6aa | ||
|
|
d9e7b898a3 | ||
|
|
59d4081f7a | ||
|
|
6e87ac0a0e | ||
|
|
d89cfd0d4d | ||
|
|
8a0054aa89 | ||
|
|
f859932745 | ||
|
|
9a8fc08e6a | ||
|
|
825e4beead | ||
|
|
0a23b40321 | ||
|
|
cf6ef0a30d | ||
|
|
65a659d136 | ||
|
|
62bcb45787 | ||
|
|
94f3542a4f | ||
|
|
fc3bc5327d | ||
|
|
9e33ddceea | ||
|
|
c9bdf4ff9f | ||
|
|
0a9972aa9a | ||
|
|
76d5b710c8 |
102
.github/actions/build-dev-builder-image/action.yml
vendored
102
.github/actions/build-dev-builder-image/action.yml
vendored
@@ -1,102 +0,0 @@
|
||||
name: Build and push dev-builder image
|
||||
description: Build and push dev-builder image to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
acr-image-registry:
|
||||
description: The ACR image registry to store the images
|
||||
required: true
|
||||
acr-image-registry-username:
|
||||
description: The ACR username to login to the image registry
|
||||
required: true
|
||||
acr-image-registry-password:
|
||||
description: The ACR password to login to the image registry
|
||||
required: true
|
||||
acr-image-namespace:
|
||||
description: The ACR namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push android dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Login to ACR
|
||||
uses: docker/login-action@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ inputs.acr-image-registry }}
|
||||
username: ${{ inputs.acr-image-registry-username }}
|
||||
password: ${{ inputs.acr-image-registry-password }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Build and push dev-builder images
|
||||
description: Build and push dev-builder images to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-android == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Build Windows artifacts
|
||||
description: Build Windows artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
|
||||
- name: Install latest nextest release # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Deploy GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
aws-ci-test-bucket:
|
||||
description: 'AWS S3 bucket name for testing'
|
||||
required: true
|
||||
aws-region:
|
||||
description: 'AWS region for testing'
|
||||
required: true
|
||||
data-root:
|
||||
description: 'Data root for testing'
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: 'AWS access key id for testing'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS secret access key for testing'
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Deploy GreptimeDB by Helm
|
||||
shell: bash
|
||||
env:
|
||||
DATA_ROOT: ${{ inputs.data-root }}
|
||||
AWS_CI_TEST_BUCKET: ${{ inputs.aws-ci-test-bucket }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
./.github/scripts/deploy-greptimedb.sh
|
||||
136
.github/actions/release-cn-artifacts/action.yaml
vendored
136
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -1,20 +1,29 @@
|
||||
name: Release CN artifacts
|
||||
description: Release artifacts to CN region
|
||||
inputs:
|
||||
image-registry:
|
||||
description: The image registry to store the images
|
||||
src-image-registry:
|
||||
description: The source image registry to store the images
|
||||
required: true
|
||||
image-namespace:
|
||||
description: The namespace of the image registry to store the images
|
||||
default: docker.io
|
||||
src-image-namespace:
|
||||
description: The namespace of the source image registry to store the images
|
||||
required: true
|
||||
image-name:
|
||||
description: The name of the image to build
|
||||
default: greptime
|
||||
src-image-name:
|
||||
description: The name of the source image
|
||||
required: false
|
||||
default: greptimedb
|
||||
image-registry-username:
|
||||
dst-image-registry:
|
||||
description: The destination image registry to store the images
|
||||
required: true
|
||||
dst-image-namespace:
|
||||
description: The namespace of the destination image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
dst-image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
image-registry-password:
|
||||
dst-image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
version:
|
||||
@@ -25,7 +34,7 @@ inputs:
|
||||
required: false
|
||||
default: 'false'
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
description: Whether to push the latest tag of the image
|
||||
required: false
|
||||
default: 'true'
|
||||
aws-cn-s3-bucket:
|
||||
@@ -48,8 +57,8 @@ inputs:
|
||||
description: Directory to store artifacts
|
||||
required: false
|
||||
default: 'artifacts'
|
||||
update-latest-version-info:
|
||||
description: Upload the latest version info in S3
|
||||
update-version-info:
|
||||
description: Update the version info in S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
@@ -63,76 +72,67 @@ inputs:
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install skopeo
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y skopeo
|
||||
|
||||
- name: Push images from Dockerhub to ACR
|
||||
shell: bash
|
||||
run: |
|
||||
skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
|
||||
--dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
|
||||
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}
|
||||
|
||||
if [[ "${{ inputs.dev-mode }}" == "false" ]]; then
|
||||
skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
|
||||
--dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
|
||||
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}-centos:${{ inputs.version }}
|
||||
fi
|
||||
|
||||
- name: Push latest images from Dockerhub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
run: |
|
||||
skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
|
||||
--dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
|
||||
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
|
||||
|
||||
if [[ "${{ inputs.dev-mode }}" == "false" ]]; then
|
||||
skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
|
||||
--dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
|
||||
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}-centos:latest
|
||||
fi
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Upload artifacts to aws-cn S3
|
||||
- name: Release artifacts to cn region
|
||||
uses: nick-invision/retry@v2
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
command: |
|
||||
cd ${{ inputs.artifacts-dir }} && \
|
||||
aws s3 cp . s3://${{ inputs.aws-cn-s3-bucket }}/releases/greptimedb/${{ inputs.version }} \
|
||||
--recursive --exclude "*" --include "greptime-*.tar.gz" --include "greptime-*.sha256sum"
|
||||
./.github/scripts/upload-artifacts-to-s3.sh \
|
||||
${{ inputs.artifacts-dir }} \
|
||||
${{ inputs.version }} \
|
||||
${{ inputs.aws-cn-s3-bucket }}
|
||||
|
||||
- name: Update latest version info in aws-cn S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' && inputs.update-latest-version-info == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
|
||||
uses: nick-invision/retry@v2
|
||||
- name: Push greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
echo "${{ inputs.version }}" > ${{ inputs.artifacts-dir }}/latest-version.txt && \
|
||||
aws cp ${{ inputs.artifacts-dir }}/latest-version.txt s3://${{ inputs.aws-cn-s3-bucket }}/releases/greptimedb/latest-version.txt
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push latest greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Run sqlness test
|
||||
description: Run sqlness test on GreptimeDB
|
||||
|
||||
inputs:
|
||||
aws-ci-test-bucket:
|
||||
description: 'AWS S3 bucket name for testing'
|
||||
required: true
|
||||
aws-region:
|
||||
description: 'AWS region for testing'
|
||||
required: true
|
||||
data-root:
|
||||
description: 'Data root for testing'
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: 'AWS access key id for testing'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS secret access key for testing'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Deploy GreptimeDB cluster by Helm
|
||||
uses: ./.github/actions/deploy-greptimedb
|
||||
with:
|
||||
data-root: ${{ inputs.data-root }}
|
||||
aws-ci-test-bucket: ${{ inputs.aws-ci-test-bucket }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
# TODO(zyy17): The following tests will be replaced by the real sqlness test.
|
||||
- name: Run tests on greptimedb cluster
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 14002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 14002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Run tests on greptimedb cluster that uses S3
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 24002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 24002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Run tests on standalone greptimedb
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 34002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 34002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Clean S3 data
|
||||
shell: bash
|
||||
env:
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
14
.github/actions/upload-artifacts/action.yml
vendored
14
.github/actions/upload-artifacts/action.yml
vendored
@@ -33,9 +33,21 @@ runs:
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Calculate checksum
|
||||
if: runner.os != 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Calculate checksum on Windows
|
||||
if: runner.os == 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: pwsh
|
||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||
- name: Upload artifacts
|
||||
|
||||
47
.github/scripts/copy-image.sh
vendored
Executable file
47
.github/scripts/copy-image.sh
vendored
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
SRC_IMAGE=$1
|
||||
DST_REGISTRY=$2
|
||||
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Copies images from DockerHub to the destination registry.
|
||||
function copy_images_from_dockerhub() {
|
||||
# Check if docker is installed.
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "docker is not installed. Please install docker to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract the name and tag of the source image.
|
||||
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
|
||||
|
||||
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
|
||||
|
||||
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://"$DST_REGISTRY/$IMAGE_NAME"
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
copy_images_from_dockerhub
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
|
||||
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
main
|
||||
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
EOF
|
||||
}
|
||||
|
||||
# Add greptime Helm chart repo.
|
||||
function add_greptime_chart() {
|
||||
helm repo add greptime "$GREPTIME_CHART"
|
||||
helm repo update
|
||||
}
|
||||
|
||||
# Deploy a etcd cluster with 3 members.
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
kubectl rollout status statefulset/etcd -n "$namespace"
|
||||
}
|
||||
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for greptimedb-operator to be ready.
|
||||
kubectl rollout status deployment/greptimedb-operator -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
}
|
||||
|
||||
# Deploy greptimedb cluster by using local storage.
|
||||
# It will expose cluster service ports as '14000', '14001', '14002', '14003' to local access.
|
||||
function deploy_greptimedb_cluster() {
|
||||
local cluster_name=$1
|
||||
local install_namespace=$2
|
||||
|
||||
kubectl create ns "$install_namespace"
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
|
||||
# Expose greptimedb cluster to local access.
|
||||
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||
14000:4000 \
|
||||
14001:4001 \
|
||||
14002:4002 \
|
||||
14003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Deploy greptimedb cluster by using S3.
|
||||
# It will expose cluster service ports as '24000', '24001', '24002', '24003' to local access.
|
||||
function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
local cluster_name=$1
|
||||
local install_namespace=$2
|
||||
|
||||
kubectl create ns "$install_namespace"
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.s3.secretName=s3-credentials \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.secretCreation.enabled=true \
|
||||
--set storage.credentials.secretCreation.enableEncryption=false \
|
||||
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
|
||||
# Expose greptimedb cluster to local access.
|
||||
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||
24000:4000 \
|
||||
24001:4001 \
|
||||
24002:4002 \
|
||||
24003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
kubectl rollout status statefulset/greptimedb-standalone -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Expose greptimedb to local access.
|
||||
kubectl -n "$DEFAULT_INSTALL_NAMESPACE" port-forward svc/greptimedb-standalone \
|
||||
34000:4000 \
|
||||
34001:4001 \
|
||||
34002:4002 \
|
||||
34003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Entrypoint of the script.
|
||||
function main() {
|
||||
create_kind_cluster
|
||||
add_greptime_chart
|
||||
|
||||
# Deploy standalone greptimedb in the same K8s.
|
||||
if [ "$ENABLE_STANDALONE_MODE" == "true" ]; then
|
||||
deploy_standalone_greptimedb
|
||||
fi
|
||||
|
||||
deploy_greptimedb_operator
|
||||
deploy_greptimedb_cluster testcluster testcluster
|
||||
deploy_greptimedb_cluster_with_s3_storage testcluster-s3 testcluster-s3
|
||||
}
|
||||
|
||||
# Usages:
|
||||
# - Deploy greptimedb cluster: ./deploy-greptimedb.sh
|
||||
main
|
||||
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
ARTIFACTS_DIR=$1
|
||||
VERSION=$2
|
||||
AWS_S3_BUCKET=$3
|
||||
RELEASE_DIRS="releases/greptimedb"
|
||||
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Uploads artifacts to AWS S3 bucket.
|
||||
function upload_artifacts() {
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
done
|
||||
}
|
||||
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
aws s3 cp \
|
||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||
fi
|
||||
|
||||
# If it's the nightly release, update latest-nightly-version.txt.
|
||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||
echo "Updating latest-nightly-version.txt"
|
||||
echo "$VERSION" > latest-nightly-version.txt
|
||||
aws s3 cp \
|
||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
|
||||
function download_artifacts_from_github() {
|
||||
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
|
||||
# Check if jq is installed.
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "jq is not installed. Please install jq to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest release API response.
|
||||
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
|
||||
|
||||
# Extract download URLs for the artifacts.
|
||||
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
|
||||
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
|
||||
|
||||
# Download each asset.
|
||||
while IFS= read -r url; do
|
||||
if [ -n "$url" ]; then
|
||||
curl -LJO "$url"
|
||||
echo "Downloaded: $url"
|
||||
fi
|
||||
done <<< "$ASSET_URLS"
|
||||
fi
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
download_artifacts_from_github
|
||||
upload_artifacts
|
||||
update_version_info
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
|
||||
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
|
||||
# AWS_DEFAULT_REGION=<your_region> \
|
||||
# UPDATE_VERSION_INFO=true \
|
||||
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
|
||||
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
|
||||
main
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -17,7 +17,7 @@ env:
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
40
.github/workflows/dev-build.yml
vendored
40
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -164,12 +164,7 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
@@ -209,7 +204,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
@@ -241,7 +236,7 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -251,10 +246,13 @@ jobs:
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
@@ -262,13 +260,13 @@ jobs:
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-latest-version-info: false # Don't update the latest version info in S3.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -293,7 +291,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -320,7 +318,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
54
.github/workflows/develop.yml
vendored
54
.github/workflows/develop.yml
vendored
@@ -34,7 +34,7 @@ env:
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
toml:
|
||||
name: Toml Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
os: [ ubuntu-20.04-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -188,43 +188,3 @@ jobs:
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
test-on-windows:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
4
.github/workflows/doc-issue.yml
vendored
4
.github/workflows/doc-issue.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
|
||||
12
.github/workflows/docs.yml
vendored
12
.github/workflows/docs.yml
vendored
@@ -30,7 +30,7 @@ name: CI
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -38,33 +38,33 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
38
.github/workflows/nightly-build.yml
vendored
38
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -175,7 +175,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -218,23 +218,27 @@ jobs:
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-latest-version-info: false # Don't update the latest version info in S3.
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -259,7 +263,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -286,7 +290,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
98
.github/workflows/nightly-ci.yml
vendored
Normal file
98
.github/workflows/nightly-ci.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for cargo test"}
|
||||
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
4
.github/workflows/pr-title-checker.yml
vendored
4
.github/workflows/pr-title-checker.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
|
||||
@@ -3,17 +3,31 @@ name: Release dev-builder images
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
release_dev_builder_images:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder images
|
||||
description: Release dev-builder-ubuntu image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_centos_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-centos image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_android_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_images }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -21,10 +35,51 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-image
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
85
.github/workflows/release.yml
vendored
85
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -63,6 +63,11 @@ on:
|
||||
description: Build macos artifacts
|
||||
required: false
|
||||
default: false
|
||||
build_windows_artifacts:
|
||||
type: boolean
|
||||
description: Build Windows artifacts
|
||||
required: false
|
||||
default: false
|
||||
publish_github_release:
|
||||
type: boolean
|
||||
description: Create GitHub release and upload artifacts
|
||||
@@ -92,11 +97,12 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -234,6 +240,42 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
@@ -260,11 +302,15 @@ jobs:
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -277,27 +323,34 @@ jobs:
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: true
|
||||
push-latest-tag: true
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
needs: [ # The job have to wait for all the artifacts are built.
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@@ -315,7 +368,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -340,7 +393,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
|
||||
26
.github/workflows/size-label.yml
vendored
Normal file
26
.github/workflows/size-label.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: size-labeler
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label the PR size
|
||||
steps:
|
||||
- uses: codelytv/pr-size-labeler@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
s_label: 'Size: S'
|
||||
s_max_size: '100'
|
||||
m_label: 'Size: M'
|
||||
m_max_size: '500'
|
||||
l_label: 'Size: L'
|
||||
l_max_size: '1000'
|
||||
xl_label: 'Size: XL'
|
||||
fail_if_xl: 'false'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 1000 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size.
|
||||
github_api_url: 'api.github.com'
|
||||
files_to_ignore: 'Cargo.lock'
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
Read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||
|
||||
@@ -21,7 +21,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
|
||||
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
|
||||
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
|
||||
- Submit bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
||||
- Submitting bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
||||
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
|
||||
|
||||
## Code of Conduct
|
||||
@@ -81,7 +81,7 @@ Now, `pre-commit` will run automatically on `git commit`.
|
||||
### Title
|
||||
|
||||
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
|
||||
like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NOT use last commit message as pull request title.
|
||||
like `feat`/`fix`/`docs`, with a concise summary of code change following. AVOID using the last commit message as pull request title.
|
||||
|
||||
### Description
|
||||
|
||||
@@ -100,7 +100,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
|
||||
## Community
|
||||
|
||||
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
2839
Cargo.lock
generated
2839
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
49
Cargo.toml
49
Cargo.toml
@@ -27,6 +27,7 @@ members = [
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/decimal",
|
||||
"src/common/version",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
@@ -35,6 +36,7 @@ members = [
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/metric-engine",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
@@ -55,42 +57,47 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0"
|
||||
version = "0.4.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "43.0" }
|
||||
arrow-array = "43.0"
|
||||
arrow-flight = "43.0"
|
||||
arrow-schema = { version = "43.0", features = ["serde"] }
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
bigdecimal = "0.4.2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.11"
|
||||
etcd-client = "0.12"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5da72f1cae6b24315e5afc87520aaf7b4d6bb872" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
metrics = "0.20"
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
||||
parquet = "43.0"
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
] }
|
||||
parquet = "47.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
@@ -99,11 +106,13 @@ reqwest = { version = "0.11", default-features = false, features = [
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
] }
|
||||
rust_decimal = "1.32.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4", features = [
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "0fbae07d0c46dc18e3381c406d8b9b8abef6b1fd", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -111,7 +120,7 @@ tempfile = "3"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
|
||||
@@ -184,6 +184,6 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
||||
## Acknowledgement
|
||||
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
||||
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB’s meta service is based on [etcd](https://etcd.io/).
|
||||
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||
|
||||
@@ -6,8 +6,10 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
|
||||
@@ -29,14 +29,14 @@ use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
@@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
@@ -104,7 +109,7 @@ async fn write_data(
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
@@ -113,7 +118,7 @@ async fn write_data(
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
let _ = db.insert(requests).await.unwrap();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -131,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
@@ -141,8 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
semantic_type: semantic_type as i32,
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
@@ -243,11 +252,11 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: "".to_string(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
@@ -261,7 +270,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
@@ -405,31 +414,31 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: false,
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr()).await;
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
@@ -447,8 +456,10 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -457,24 +468,32 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database) {
|
||||
for (query_name, query) in query_set() {
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let _res = db.sql(&query).await.unwrap();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis()
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -491,13 +510,14 @@ fn main() {
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db).await;
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,19 +13,19 @@ rpc_runtime_size = 8
|
||||
require_lease_before_startup = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 3000 by default.
|
||||
interval_millis = 3000
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
interval = "3s"
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Heartbeat timeout in milliseconds, 500 by default.
|
||||
heartbeat_timeout_millis = 500
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 1000
|
||||
# Heartbeat timeout, 500 milliseconds by default.
|
||||
heartbeat_timeout = "500ms"
|
||||
# Operation timeout, 3 seconds by default.
|
||||
timeout = "3s"
|
||||
# Connect server timeout, 1 second by default.
|
||||
connect_timeout = "1s"
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -51,7 +51,7 @@ type = "File"
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256Mib"
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
@@ -101,6 +101,10 @@ auto_flush_interval = "1h"
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
|
||||
# Log options
|
||||
# [logging]
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
mode = "distributed"
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||
interval = "5s"
|
||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||
retry_interval = "5s"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http]
|
||||
@@ -59,10 +59,10 @@ enable = true
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
timeout = "3s"
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 1000
|
||||
ddl_timeout = "10s"
|
||||
connect_timeout = "1s"
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
|
||||
@@ -32,6 +32,6 @@ retry_delay = "500ms"
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout_millis = 10000
|
||||
# connect_timeout_millis = 10000
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -82,6 +82,8 @@ enable = true
|
||||
|
||||
# WAL options.
|
||||
[wal]
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
@@ -93,8 +95,8 @@ read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Kv options.
|
||||
[kv_store]
|
||||
# Metadata storage options.
|
||||
[metadata_store]
|
||||
# Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
@@ -118,7 +120,7 @@ type = "File"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256Mib"
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
FROM ubuntu:20.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
@@ -7,6 +7,11 @@ ARG OUTPUT_DIR
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
FROM ubuntu:22.04
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
@@ -14,8 +19,13 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3-pip
|
||||
python3.10-dev
|
||||
|
||||
# Remove Python 3.8 and install pip.
|
||||
RUN apt-get -y purge python3.8 && \
|
||||
apt-get -y autoremove && \
|
||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
|
||||
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# TSBS benchmark - v0.4.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Aliyun amd64
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | ecs.g7.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 |
|
||||
|
||||
### Aliyun arm64
|
||||
|
||||
| | |
|
||||
| ------- | ----------------- |
|
||||
| Machine | ecs.g8y.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 ARM |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate(rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 365280.60 |
|
||||
| Aliyun g7.4xlarge | 341368.72 |
|
||||
| Aliyun g8y.4xlarge | 320907.29 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | Aliyun g7.4xlarge (ms) | Aliyun g8y.4xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- | ----------------------- |
|
||||
| cpu-max-all-1 | 50.70 | 31.46 | 47.61 |
|
||||
| cpu-max-all-8 | 262.16 | 129.26 | 152.43 |
|
||||
| double-groupby-1 | 2512.71 | 1408.19 | 1586.10 |
|
||||
| double-groupby-5 | 3896.15 | 2304.29 | 2585.29 |
|
||||
| double-groupby-all | 5404.67 | 3337.61 | 3773.91 |
|
||||
| groupby-orderby-limit | 3786.98 | 2065.72 | 2312.57 |
|
||||
| high-cpu-1 | 71.96 | 37.29 | 54.01 |
|
||||
| high-cpu-all | 9468.75 | 7595.69 | 8467.46 |
|
||||
| lastpoint | 13379.43 | 11253.76 | 12949.40 |
|
||||
| single-groupby-1-1-1 | 20.72 | 12.16 | 13.35 |
|
||||
| single-groupby-1-1-12 | 28.53 | 15.67 | 21.62 |
|
||||
| single-groupby-1-8-1 | 72.23 | 37.90 | 43.52 |
|
||||
| single-groupby-5-1-1 | 26.75 | 15.59 | 17.48 |
|
||||
| single-groupby-5-1-12 | 45.41 | 22.90 | 31.96 |
|
||||
| single-groupby-5-8-1 | 107.96 | 59.76 | 69.58 |
|
||||
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
@@ -0,0 +1,113 @@
|
||||
---
|
||||
Feature Name: Inverted Index for SST File
|
||||
Tracking Issue: TBD
|
||||
Date: 2023-11-03
|
||||
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
This RFC proposes an optimization towards the storage engine by introducing an inverted indexing methodology aimed at optimizing label selection queries specifically pertaining to Metrics with tag columns as the target for optimization.
|
||||
|
||||
# Introduction
|
||||
In the current system setup, in the Mito Engine, the first column of Primary Keys has a Min-Max index, which significantly optimizes the outcome. However, there are limitations when it comes to other columns, primarily tags. This RFC suggests the implementation of an inverted index to provide enhanced filtering benefits to bridge these limitations and improve overall system performance.
|
||||
|
||||
# Design Detail
|
||||
|
||||
## Inverted Index
|
||||
|
||||
The primary aim of the proposed inverted index is to optimize tag columns in the SST Parquet Files within the Mito Engine. The mapping and construction of an inverted index, from Tag Values to Row Groups, enables efficient logical structures that provide faster and more flexible queries.
|
||||
|
||||
When scanning SST Files, pushed-down filters applied to a respective Tag's inverted index, determine the final Row Groups to be indexed and scanned, further bolstering the speed and efficiency of data retrieval processes.
|
||||
|
||||
## Index Format
|
||||
|
||||
The Inverted Index for each SST file follows the format shown below:
|
||||
|
||||
```
|
||||
inverted_index₀ inverted_index₁ ... inverted_indexₙ footer
|
||||
```
|
||||
|
||||
The structure inside each Inverted Index is as followed:
|
||||
|
||||
```
|
||||
bitmap₀ bitmap₁ bitmap₂ ... bitmapₙ null_bitmap fst
|
||||
```
|
||||
|
||||
The format is encapsulated by a footer:
|
||||
|
||||
```
|
||||
footer_payload footer_payload_size
|
||||
```
|
||||
|
||||
The `footer_payload` is presented in protobuf encoding of `InvertedIndexFooter`.
|
||||
|
||||
The complete format is containerized in [Puffin](https://iceberg.apache.org/puffin-spec/) with the type defined as `greptime-inverted-index-v1`.
|
||||
|
||||
## Protobuf Details
|
||||
|
||||
The `InvertedIndexFooter` is defined in the following protobuf structure:
|
||||
|
||||
```protobuf
|
||||
message InvertedIndexFooter {
|
||||
repeated InvertedIndexMeta metas;
|
||||
}
|
||||
|
||||
message InvertedIndexMeta {
|
||||
string name;
|
||||
uint64 row_count_in_group;
|
||||
uint64 fst_offset;
|
||||
uint64 fst_size;
|
||||
uint64 null_bitmap_offset;
|
||||
uint64 null_bitmap_size;
|
||||
InvertedIndexStats stats;
|
||||
}
|
||||
|
||||
message InvertedIndexStats {
|
||||
uint64 null_count;
|
||||
uint64 distinct_count;
|
||||
bytes min_value;
|
||||
bytes max_value;
|
||||
}
|
||||
```
|
||||
|
||||
## Bitmap
|
||||
|
||||
Bitmaps are used to represent indices of fixed-size groups. Rows are divided into groups of a fixed size, defined in the `InvertedIndexMeta` as `row_count_in_group`.
|
||||
|
||||
For example, when `row_count_in_group` is `4096`, it means each group has `4096` rows. If there are a total of `10000` rows, there will be `3` groups in total. The first two groups will have `4096` rows each, and the last group will have `1808` rows. If the indexed values are found in row `200` and `9000`, they will correspond to groups `0` and `2`, respectively. Therefore, the bitmap should show `0` and `2`.
|
||||
|
||||
Bitmap is implemented using [BitVec](https://docs.rs/bitvec/latest/bitvec/), selected due to its efficient representation of dense data arrays typical of indices of groups.
|
||||
|
||||
|
||||
## Finite State Transducer (FST)
|
||||
|
||||
[FST](https://docs.rs/fst/latest/fst/) is a highly efficient data structure ideal for in-memory indexing. It represents ordered sets or maps where the keys are bytes. The choice of the FST effectively balances the need for performance, space efficiency, and the ability to perform complex analyses such as regular expression matching.
|
||||
|
||||
The conventional usage of FST and `u64` values has been adapted to facilitate indirect indexing to row groups. As the row groups are represented as Bitmaps, we utilize the `u64` values split into bitmap's offset (higher 32 bits) and size (lower 32 bits) to represent the location of these Bitmaps.
|
||||
|
||||
## API Design
|
||||
|
||||
Two APIs `InvertedIndexBuilder` for building indexes and `InvertedIndexSearcher` for querying indexes are designed:
|
||||
|
||||
```rust
|
||||
type Bytes = Vec<u8>;
|
||||
type GroupId = u64;
|
||||
|
||||
trait InvertedIndexBuilder {
|
||||
fn add(&mut self, name: &str, value: Option<&Bytes>, group_id: GroupId) -> Result<()>;
|
||||
fn finish(&mut self) -> Result<()>;
|
||||
}
|
||||
|
||||
enum Predicate {
|
||||
Gt(Bytes),
|
||||
GtEq(Bytes),
|
||||
Lt(Bytes),
|
||||
LtEq(Bytes),
|
||||
InList(Vec<Bytes>),
|
||||
RegexMatch(String),
|
||||
}
|
||||
|
||||
trait InvertedIndexSearcher {
|
||||
fn search(&mut self, name: &str, predicates: &[Predicate]) -> Result<impl IntoIterator<GroupId>>;
|
||||
}
|
||||
```
|
||||
@@ -28,7 +28,12 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType { datatype: i32, location: Location },
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
|
||||
@@ -50,7 +50,7 @@ pub struct ColumnDataTypeWrapper(ColumnDataType);
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
pub fn try_new(datatype: i32) -> Result<Self> {
|
||||
let datatype = ColumnDataType::from_i32(datatype)
|
||||
let datatype = ColumnDataType::try_from(datatype)
|
||||
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
||||
Ok(Self(datatype))
|
||||
}
|
||||
@@ -705,7 +705,7 @@ pub fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool
|
||||
|
||||
/// Returns true if the pb type value is valid.
|
||||
pub fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
|
||||
let Some(column_type) = ColumnDataType::from_i32(type_value) else {
|
||||
let Ok(column_type) = ColumnDataType::try_from(type_value) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
|
||||
@@ -30,10 +30,10 @@ futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
metrics.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
prometheus.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -25,11 +25,10 @@ use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::{debug, timer};
|
||||
use common_telemetry::debug;
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -152,25 +151,11 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
}
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
let from_key = &req.from_key.clone();
|
||||
let to_key = &req.to_key.clone();
|
||||
|
||||
let ret = self.kv_backend.move_value(req).await;
|
||||
|
||||
if ret.is_ok() {
|
||||
self.invalidate_key(from_key).await;
|
||||
self.invalidate_key(to_key).await;
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
||||
let _timer = METRIC_CATALOG_KV_GET.start_timer();
|
||||
|
||||
let init = async {
|
||||
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
|
||||
let _timer = METRIC_CATALOG_KV_REMOTE_GET.start_timer();
|
||||
self.kv_backend.get(key).await.map(|val| {
|
||||
val.with_context(|| CacheNotGetSnafu {
|
||||
key: String::from_utf8_lossy(key),
|
||||
@@ -319,14 +304,6 @@ impl KvBackend for MetaKvBackend {
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
self.client
|
||||
.move_value(req)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
|
||||
use metrics::{decrement_gauge, increment_gauge};
|
||||
use snafu::OptionExt;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -166,7 +166,7 @@ impl MemoryCatalogManager {
|
||||
let arc_self = Arc::new(self.clone());
|
||||
let catalog = arc_self.create_catalog_entry(name);
|
||||
e.insert(catalog);
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT.inc();
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
@@ -187,11 +187,9 @@ impl MemoryCatalogManager {
|
||||
})?;
|
||||
let result = schema.remove(&request.table_name);
|
||||
if result.is_some() {
|
||||
decrement_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT
|
||||
.with_label_values(&[build_db_string(&request.catalog, &request.schema).as_str()])
|
||||
.dec();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -210,7 +208,7 @@ impl MemoryCatalogManager {
|
||||
match catalog.entry(request.schema) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(HashMap::new());
|
||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT.inc();
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Occupied(_) => Ok(false),
|
||||
@@ -238,11 +236,9 @@ impl MemoryCatalogManager {
|
||||
.fail();
|
||||
}
|
||||
schema.insert(request.table_name, request.table);
|
||||
increment_gauge!(
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
||||
1.0,
|
||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
||||
);
|
||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT
|
||||
.with_label_values(&[build_db_string(&request.catalog, &request.schema).as_str()])
|
||||
.inc();
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,18 +12,24 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::build_db_string;
|
||||
|
||||
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
||||
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
|
||||
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::*;
|
||||
|
||||
pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
|
||||
pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
|
||||
(METRIC_DB_LABEL, build_db_string(catalog, schema))
|
||||
lazy_static! {
|
||||
pub static ref METRIC_CATALOG_MANAGER_CATALOG_COUNT: IntGauge =
|
||||
register_int_gauge!("catalog_catalog_count", "catalog catalog count").unwrap();
|
||||
pub static ref METRIC_CATALOG_MANAGER_SCHEMA_COUNT: IntGauge =
|
||||
register_int_gauge!("catalog_schema_count", "catalog schema count").unwrap();
|
||||
pub static ref METRIC_CATALOG_MANAGER_TABLE_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||
"catalog_table_count",
|
||||
"catalog table count",
|
||||
&[METRIC_DB_LABEL]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_CATALOG_KV_REMOTE_GET: Histogram =
|
||||
register_histogram!("catalog_kv_get_remote", "catalog kv get remote").unwrap();
|
||||
pub static ref METRIC_CATALOG_KV_GET: Histogram =
|
||||
register_histogram!("catalog_kv_get", "catalog kv get").unwrap();
|
||||
}
|
||||
|
||||
@@ -27,8 +27,10 @@ datatypes = { workspace = true }
|
||||
derive_builder.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
session = { workspace = true }
|
||||
@@ -41,11 +43,10 @@ tonic.workspace = true
|
||||
common-grpc-expr = { workspace = true }
|
||||
datanode = { workspace = true }
|
||||
derive-new = "0.5"
|
||||
prost.workspace = true
|
||||
substrait = { workspace = true }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
[dev-dependencies.substrait_proto]
|
||||
package = "substrait"
|
||||
version = "0.7"
|
||||
version = "0.17"
|
||||
|
||||
@@ -139,11 +139,19 @@ impl Client {
|
||||
}
|
||||
|
||||
fn max_grpc_recv_message_size(&self) -> usize {
|
||||
self.inner.channel_manager.config().max_recv_message_size
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
.max_recv_message_size
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
fn max_grpc_send_message_size(&self) -> usize {
|
||||
self.inner.channel_manager.config().max_send_message_size
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
.max_send_message_size
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
|
||||
@@ -28,7 +28,7 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::RecordBatchStreamAdaptor;
|
||||
use common_telemetry::{logging, timer};
|
||||
use common_telemetry::logging;
|
||||
use futures_util::StreamExt;
|
||||
use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
@@ -111,12 +111,12 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
||||
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||
self.handle(Request::Inserts(requests)).await
|
||||
}
|
||||
|
||||
pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
||||
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||
self.handle(Request::RowInserts(requests)).await
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
|
||||
let _timer = metrics::METRIC_GRPC_DELETE.start_timer();
|
||||
self.handle(Request::Deletes(request)).await
|
||||
}
|
||||
|
||||
@@ -167,11 +167,14 @@ impl Database {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
||||
pub async fn sql<S>(&self, sql: S) -> Result<Output>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
{
|
||||
let _timer = metrics::METRIC_GRPC_SQL.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::Sql(sql.to_string())),
|
||||
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||
}),
|
||||
0,
|
||||
)
|
||||
@@ -179,7 +182,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>, trace_id: u64) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
|
||||
let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
@@ -196,7 +199,7 @@ impl Database {
|
||||
end: &str,
|
||||
step: &str,
|
||||
) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
|
||||
let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer();
|
||||
self.do_get(
|
||||
Request::Query(QueryRequest {
|
||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||
@@ -212,7 +215,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
|
||||
let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
@@ -223,7 +226,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
|
||||
let _timer = metrics::METRIC_GRPC_ALTER.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::Alter(expr)),
|
||||
@@ -234,7 +237,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
|
||||
let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::DropTable(expr)),
|
||||
@@ -245,7 +248,7 @@ impl Database {
|
||||
}
|
||||
|
||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||
let _timer = timer!(metrics::METRIC_GRPC_TRUNCATE_TABLE);
|
||||
let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer();
|
||||
self.do_get(
|
||||
Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||
@@ -257,7 +260,7 @@ impl Database {
|
||||
|
||||
async fn do_get(&self, request: Request, trace_id: u64) -> Result<Output> {
|
||||
// FIXME(paomian): should be added some labels for metrics
|
||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
||||
let _timer = metrics::METRIC_GRPC_DO_GET.start_timer();
|
||||
let request = self.to_rpc_request(request, trace_id);
|
||||
let request = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
|
||||
@@ -26,6 +26,8 @@ use api::v1::greptime_response::Response;
|
||||
use api::v1::{AffectedRows, GreptimeResponse};
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::status_code::StatusCode;
|
||||
pub use common_query::Output;
|
||||
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
|
||||
@@ -12,15 +12,34 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! client metrics
|
||||
pub const METRIC_GRPC_CREATE_TABLE: &str = "grpc.create_table";
|
||||
pub const METRIC_GRPC_PROMQL_RANGE_QUERY: &str = "grpc.promql.range_query";
|
||||
pub const METRIC_GRPC_INSERT: &str = "grpc.insert";
|
||||
pub const METRIC_GRPC_DELETE: &str = "grpc.delete";
|
||||
pub const METRIC_GRPC_SQL: &str = "grpc.sql";
|
||||
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
||||
pub const METRIC_GRPC_TRUNCATE_TABLE: &str = "grpc.truncate_table";
|
||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
||||
pub(crate) const METRIC_REGION_REQUEST_GRPC: &str = "grpc.region_request";
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref METRIC_GRPC_CREATE_TABLE: Histogram =
|
||||
register_histogram!("grpc_create_table", "grpc create table").unwrap();
|
||||
pub static ref METRIC_GRPC_PROMQL_RANGE_QUERY: Histogram =
|
||||
register_histogram!("grpc_promql_range_query", "grpc promql range query").unwrap();
|
||||
pub static ref METRIC_GRPC_INSERT: Histogram =
|
||||
register_histogram!("grpc_insert", "grpc insert").unwrap();
|
||||
pub static ref METRIC_GRPC_DELETE: Histogram =
|
||||
register_histogram!("grpc_delete", "grpc delete").unwrap();
|
||||
pub static ref METRIC_GRPC_SQL: Histogram =
|
||||
register_histogram!("grpc_sql", "grpc sql").unwrap();
|
||||
pub static ref METRIC_GRPC_LOGICAL_PLAN: Histogram =
|
||||
register_histogram!("grpc_logical_plan", "grpc logical plan").unwrap();
|
||||
pub static ref METRIC_GRPC_ALTER: Histogram =
|
||||
register_histogram!("grpc_alter", "grpc alter").unwrap();
|
||||
pub static ref METRIC_GRPC_DROP_TABLE: Histogram =
|
||||
register_histogram!("grpc_drop_table", "grpc drop table").unwrap();
|
||||
pub static ref METRIC_GRPC_TRUNCATE_TABLE: Histogram =
|
||||
register_histogram!("grpc_truncate_table", "grpc truncate table").unwrap();
|
||||
pub static ref METRIC_GRPC_DO_GET: Histogram =
|
||||
register_histogram!("grpc_do_get", "grpc do get").unwrap();
|
||||
pub static ref METRIC_REGION_REQUEST_GRPC: HistogramVec = register_histogram_vec!(
|
||||
"grpc_region_request",
|
||||
"grpc region request",
|
||||
&["request_type"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use common_meta::datanode_manager::{AffectedRows, Datanode};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
||||
use common_telemetry::{error, timer};
|
||||
use common_telemetry::error;
|
||||
use prost::Message;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use tokio_stream::StreamExt;
|
||||
@@ -152,11 +152,9 @@ impl RegionRequester {
|
||||
.with_context(|| MissingFieldSnafu { field: "body" })?
|
||||
.as_ref()
|
||||
.to_string();
|
||||
|
||||
let _timer = timer!(
|
||||
metrics::METRIC_REGION_REQUEST_GRPC,
|
||||
&[("request_type", request_type)]
|
||||
);
|
||||
let _timer = metrics::METRIC_REGION_REQUEST_GRPC
|
||||
.with_label_values(&[request_type.as_str()])
|
||||
.start_timer();
|
||||
|
||||
let mut client = self.client.raw_region_client()?;
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@ name = "greptime"
|
||||
path = "src/bin/greptime.rs"
|
||||
|
||||
[features]
|
||||
default = ["metrics-process"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
metrics-process = ["servers/metrics-process"]
|
||||
|
||||
[dependencies]
|
||||
anymap = "1.0.0-beta.2"
|
||||
@@ -45,11 +43,11 @@ futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-srv = { workspace = true }
|
||||
metrics.workspace = true
|
||||
mito2 = { workspace = true }
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { workspace = true }
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query = { workspace = true }
|
||||
rand.workspace = true
|
||||
@@ -63,6 +61,7 @@ snafu.workspace = true
|
||||
substrait = { workspace = true }
|
||||
table = { workspace = true }
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
tikv-jemallocator = "0.5"
|
||||
@@ -71,7 +70,6 @@ tikv-jemallocator = "0.5"
|
||||
common-test-util = { workspace = true }
|
||||
serde.workspace = true
|
||||
temp-env = "0.3"
|
||||
toml.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
rexpect = "0.5"
|
||||
|
||||
@@ -21,7 +21,11 @@ use cmd::error::Result;
|
||||
use cmd::options::{Options, TopLevelOptions};
|
||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
||||
use common_telemetry::logging::{error, info, TracingOptions};
|
||||
use metrics::gauge;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref APP_VERSION: prometheus::IntGaugeVec =
|
||||
prometheus::register_int_gauge_vec!("app_version", "app version", &["short_version", "version"]).unwrap();
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(name = "greptimedb", version = print_version())]
|
||||
@@ -204,11 +208,12 @@ async fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
common_telemetry::init_default_metrics_recorder();
|
||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
||||
|
||||
// Report app version as gauge.
|
||||
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
|
||||
APP_VERSION
|
||||
.with_label_values(&[short_version(), full_version()])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
info!(
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod bench;
|
||||
mod cmd;
|
||||
mod export;
|
||||
mod helper;
|
||||
mod repl;
|
||||
// TODO(weny): Removes it
|
||||
@@ -27,6 +28,7 @@ use common_telemetry::logging::LoggingOptions;
|
||||
pub use repl::Repl;
|
||||
use upgrade::UpgradeCommand;
|
||||
|
||||
use self::export::ExportCommand;
|
||||
use crate::error::Result;
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
@@ -78,17 +80,19 @@ impl Command {
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
// Attach(AttachCommand),
|
||||
Upgrade(UpgradeCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
Export(ExportCommand),
|
||||
}
|
||||
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Upgrade(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build().await,
|
||||
SubCommand::Export(cmd) => cmd.build().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,51 +108,9 @@ pub(crate) struct AttachCommand {
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
#[allow(dead_code)]
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance::Repl(repl))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
|
||||
assert!(logging_opts.level.is_none());
|
||||
assert!(!logging_opts.enable_jaeger_tracing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,14 +20,13 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use clap::Parser;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_meta::table_name::TableName;
|
||||
use common_telemetry::info;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::KvBackendAdapter;
|
||||
use rand::Rng;
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||
|
||||
@@ -64,9 +63,7 @@ impl BenchTableMetadataCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
|
||||
etcd_store,
|
||||
)));
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
table_metadata_manager,
|
||||
@@ -157,6 +154,7 @@ fn create_region_routes() -> Vec<RegionRoute> {
|
||||
addr: String::new(),
|
||||
}),
|
||||
follower_peers: vec![],
|
||||
leader_status: None,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
420
src/cmd/src/cli/export.rs
Normal file
420
src/cmd/src/cli/export.rs
Normal file
@@ -0,0 +1,420 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use client::api::v1::auth_header::AuthScheme;
|
||||
use client::api::v1::Basic;
|
||||
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||
use common_query::Output;
|
||||
use common_recordbatch::util::collect;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::vectors::{StringVector, Vector};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::cli::{Instance, Tool};
|
||||
use crate::error::{
|
||||
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
||||
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
|
||||
Result,
|
||||
};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
|
||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||
enum ExportTarget {
|
||||
/// Corresponding to `SHOW CREATE TABLE`
|
||||
#[default]
|
||||
CreateTable,
|
||||
/// Corresponding to `EXPORT TABLE`
|
||||
TableData,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct ExportCommand {
|
||||
/// Server address to connect
|
||||
#[clap(long)]
|
||||
addr: String,
|
||||
|
||||
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
||||
#[clap(long)]
|
||||
output_dir: String,
|
||||
|
||||
/// The name of the catalog to export. Default to "greptime-*"".
|
||||
#[clap(long, default_value = "")]
|
||||
database: String,
|
||||
|
||||
/// Parallelism of the export.
|
||||
#[clap(long, short = 'j', default_value = "1")]
|
||||
export_jobs: usize,
|
||||
|
||||
/// Max retry times for each job.
|
||||
#[clap(long, default_value = "3")]
|
||||
max_retry: usize,
|
||||
|
||||
/// Things to export
|
||||
#[clap(long, short = 't', value_enum)]
|
||||
target: ExportTarget,
|
||||
|
||||
/// basic authentication for connecting to the server
|
||||
#[clap(long)]
|
||||
auth_basic: Option<String>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self) -> Result<Instance> {
|
||||
let client = Client::with_urls([self.addr.clone()]);
|
||||
client
|
||||
.health_check()
|
||||
.await
|
||||
.with_context(|_| ConnectServerSnafu {
|
||||
addr: self.addr.clone(),
|
||||
})?;
|
||||
let (catalog, schema) = split_database(&self.database)?;
|
||||
let mut database_client = Database::new(
|
||||
catalog.clone(),
|
||||
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
||||
client,
|
||||
);
|
||||
|
||||
if let Some(auth_basic) = &self.auth_basic {
|
||||
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
|
||||
msg: "auth_basic cannot be split by ':'".to_string(),
|
||||
})?;
|
||||
database_client.set_auth(AuthScheme::Basic(Basic {
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(Instance::Tool(Box::new(Export {
|
||||
client: database_client,
|
||||
catalog,
|
||||
schema,
|
||||
output_dir: self.output_dir.clone(),
|
||||
parallelism: self.export_jobs,
|
||||
target: self.target.clone(),
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Export {
|
||||
client: Database,
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
output_dir: String,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
/// Iterate over all db names.
|
||||
///
|
||||
/// Newbie: `db_name` is catalog + schema.
|
||||
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
|
||||
if let Some(schema) = &self.schema {
|
||||
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||
} else {
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(self.catalog.clone());
|
||||
let result =
|
||||
client
|
||||
.sql("show databases")
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu {
|
||||
sql: "show databases".to_string(),
|
||||
})?;
|
||||
let Output::Stream(stream) = result else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
.context(EmptyResultSnafu)?;
|
||||
let schemas = record_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let mut result = Vec::with_capacity(schemas.len());
|
||||
for i in 0..schemas.len() {
|
||||
let schema = schemas.get_data(i).unwrap().to_owned();
|
||||
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||
continue;
|
||||
}
|
||||
result.push((self.catalog.clone(), schema));
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a list of [`TableReference`] to be exported.
|
||||
/// Includes all tables under the given `catalog` and `schema`
|
||||
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
|
||||
// TODO: SQL injection hurts
|
||||
let sql = format!(
|
||||
"select table_catalog, table_schema, table_name from \
|
||||
information_schema.tables where table_type = \'BASE TABLE\'\
|
||||
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||
);
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog);
|
||||
client.set_schema(schema);
|
||||
let result = client
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream) = result else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let Some(record_batch) = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
debug!("Fetched table list: {}", record_batch.pretty_print());
|
||||
|
||||
if record_batch.num_rows() == 0 {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let mut result = Vec::with_capacity(record_batch.num_rows());
|
||||
let catalog_column = record_batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let schema_column = record_batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
let table_column = record_batch
|
||||
.column(2)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap();
|
||||
for i in 0..record_batch.num_rows() {
|
||||
let catalog = catalog_column.get_data(i).unwrap().to_owned();
|
||||
let schema = schema_column.get_data(i).unwrap().to_owned();
|
||||
let table = table_column.get_data(i).unwrap().to_owned();
|
||||
result.push((catalog, schema, table));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||
let sql = format!("show create table {}.{}.{}", catalog, schema, table);
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog);
|
||||
client.set_schema(schema);
|
||||
let result = client
|
||||
.sql(&sql)
|
||||
.await
|
||||
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||
let Output::Stream(stream) = result else {
|
||||
NotDataFromOutputSnafu.fail()?
|
||||
};
|
||||
let record_batch = collect(stream)
|
||||
.await
|
||||
.context(CollectRecordBatchesSnafu)?
|
||||
.pop()
|
||||
.context(EmptyResultSnafu)?;
|
||||
let create_table = record_batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringVector>()
|
||||
.unwrap()
|
||||
.get_data(0)
|
||||
.unwrap();
|
||||
|
||||
Ok(format!("{create_table};\n"))
|
||||
}
|
||||
|
||||
async fn export_create_table(&self) -> Result<()> {
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let table_list = self.get_table_list(&catalog, &schema).await?;
|
||||
let table_count = table_list.len();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in table_list {
|
||||
match self.show_create_table(&c, &s, &t).await {
|
||||
Err(e) => {
|
||||
error!(e; "Failed to export table {}.{}.{}", c, s, t)
|
||||
}
|
||||
Ok(create_table) => {
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("success {success}/{db_count} jobs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn export_table_data(&self) -> Result<()> {
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.iter_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
for (catalog, schema) in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
tokio::fs::create_dir_all(&self.output_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||
|
||||
let mut client = self.client.clone();
|
||||
client.set_catalog(catalog.clone());
|
||||
client.set_schema(schema.clone());
|
||||
|
||||
// copy database to
|
||||
let sql = format!(
|
||||
"copy database {} to '{}' with (format='parquet');",
|
||||
schema,
|
||||
output_dir.to_str().unwrap()
|
||||
);
|
||||
client
|
||||
.sql(sql.clone())
|
||||
.await
|
||||
.context(RequestDatabaseSnafu { sql })?;
|
||||
info!("finished exporting {catalog}.{schema} data");
|
||||
|
||||
// export copy from sql
|
||||
let dir_filenames = match output_dir.read_dir() {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => {
|
||||
warn!("empty database {catalog}.{schema}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let copy_from_file =
|
||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
|
||||
for table_file in dir_filenames {
|
||||
let table_file = table_file.unwrap();
|
||||
let table_name = table_file
|
||||
.file_name()
|
||||
.into_string()
|
||||
.unwrap()
|
||||
.replace(".parquet", "");
|
||||
|
||||
writer
|
||||
.write(
|
||||
format!(
|
||||
"copy {} from '{}' with (format='parquet');\n",
|
||||
table_name,
|
||||
table_file.path().to_str().unwrap()
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("success {success}/{db_count} jobs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for Export {
|
||||
async fn do_work(&self) -> Result<()> {
|
||||
match self.target {
|
||||
ExportTarget::CreateTable => self.export_create_table().await,
|
||||
ExportTarget::TableData => self.export_table_data().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split at `-`.
|
||||
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||
let (catalog, schema) = database
|
||||
.split_once('-')
|
||||
.with_context(|| InvalidDatabaseNameSnafu {
|
||||
database: database.to_string(),
|
||||
})?;
|
||||
if schema == "*" {
|
||||
Ok((catalog.to_string(), None))
|
||||
} else {
|
||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,8 @@ use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||
use common_meta::key::{RegionDistribution, TableMetaKey};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::range_stream::PaginationStream;
|
||||
use common_meta::rpc::router::TableRoute;
|
||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||
@@ -35,8 +37,6 @@ use common_meta::util::get_prefix_end_key;
|
||||
use common_telemetry::info;
|
||||
use etcd_client::Client;
|
||||
use futures::TryStreamExt;
|
||||
use meta_srv::service::store::etcd::EtcdStore;
|
||||
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
|
||||
use prost::Message;
|
||||
use snafu::ResultExt;
|
||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
||||
@@ -81,7 +81,7 @@ impl UpgradeCommand {
|
||||
}
|
||||
|
||||
struct MigrateTableMetadata {
|
||||
etcd_store: KvStoreRef,
|
||||
etcd_store: KvBackendRef,
|
||||
dryrun: bool,
|
||||
|
||||
skip_table_global_keys: bool,
|
||||
@@ -123,7 +123,7 @@ impl MigrateTableMetadata {
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
@@ -182,7 +182,7 @@ impl MigrateTableMetadata {
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
@@ -234,7 +234,7 @@ impl MigrateTableMetadata {
|
||||
let mut keys = Vec::new();
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
@@ -284,7 +284,7 @@ impl MigrateTableMetadata {
|
||||
|
||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||
let mut stream = PaginationStream::new(
|
||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
||||
self.etcd_store.clone(),
|
||||
RangeRequest::new().with_range(key, range_end.clone()),
|
||||
PAGE_SIZE,
|
||||
Arc::new(|kv: KeyValue| {
|
||||
|
||||
@@ -96,6 +96,8 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
data_home: Option<String>,
|
||||
#[clap(long)]
|
||||
wal_dir: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
@@ -149,6 +151,10 @@ impl StartCommand {
|
||||
opts.storage.data_home = data_home.clone();
|
||||
}
|
||||
|
||||
if let Some(wal_dir) = &self.wal_dir {
|
||||
opts.wal.dir = Some(wal_dir.clone());
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
opts.http.addr = http_addr.clone();
|
||||
}
|
||||
@@ -188,6 +194,7 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -204,11 +211,14 @@ mod tests {
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[heartbeat]
|
||||
interval = "300ms"
|
||||
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
ddl_timeout_millis= 10000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
ddl_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -251,25 +261,33 @@ mod tests {
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
assert_eq!("/other/wal", options.wal.dir.unwrap());
|
||||
|
||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
assert!(!options.wal.sync_write);
|
||||
|
||||
let HeartbeatOptions {
|
||||
interval: heart_beat_interval,
|
||||
..
|
||||
} = options.heartbeat;
|
||||
|
||||
assert_eq!(300, heart_beat_interval.as_millis());
|
||||
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
timeout,
|
||||
connect_timeout,
|
||||
ddl_timeout,
|
||||
tcp_nodelay,
|
||||
ddl_timeout_millis,
|
||||
..
|
||||
} = options.meta_client.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(10000, ddl_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout.as_millis());
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
@@ -363,8 +381,8 @@ mod tests {
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -428,6 +446,7 @@ mod tests {
|
||||
|| {
|
||||
let command = StartCommand {
|
||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||
wal_dir: Some("/other/wal/dir".to_string()),
|
||||
env_prefix: env_prefix.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -455,6 +474,9 @@ mod tests {
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/other/wal/dir");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(
|
||||
opts.storage.manifest.checkpoint_margin,
|
||||
|
||||
@@ -37,6 +37,18 @@ pub enum Error {
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start procedure manager"))]
|
||||
StartProcedureManager {
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to stop procedure manager"))]
|
||||
StopProcedureManager {
|
||||
location: Location,
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start datanode"))]
|
||||
StartDatanode {
|
||||
location: Location,
|
||||
@@ -174,12 +186,45 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect server at {addr}"))]
|
||||
ConnectServer {
|
||||
addr: String,
|
||||
source: client::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Expect data from output, but got another thing"))]
|
||||
NotDataFromOutput { location: Location },
|
||||
|
||||
#[snafu(display("Empty result from output"))]
|
||||
EmptyResult { location: Location },
|
||||
|
||||
#[snafu(display("Failed to manipulate file"))]
|
||||
FileIo {
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid database name: {}", database))]
|
||||
InvalidDatabaseName {
|
||||
location: Location,
|
||||
database: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -198,12 +243,18 @@ impl ErrorExt for Error {
|
||||
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::ConnectServer { source, .. } => source.status_code(),
|
||||
Error::MissingConfig { .. }
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
| Error::ConnectEtcd { .. }
|
||||
| Error::NotDataFromOutput { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::EmptyResult { .. }
|
||||
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||
Error::StartProcedureManager { source, .. }
|
||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||
Error::CollectRecordBatches { source, .. }
|
||||
@@ -215,7 +266,7 @@ impl ErrorExt for Error {
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long)]
|
||||
grpc_addr: Option<String>,
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -150,7 +150,7 @@ impl StartCommand {
|
||||
opts.http.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.grpc_addr {
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr = addr.clone()
|
||||
}
|
||||
|
||||
@@ -188,6 +188,7 @@ impl StartCommand {
|
||||
}
|
||||
|
||||
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -200,7 +201,7 @@ impl StartCommand {
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts)
|
||||
.build_servers(opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
@@ -312,6 +313,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
@@ -353,8 +355,8 @@ mod tests {
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[mysql]
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_config::KvStoreConfig;
|
||||
use common_config::KvBackendConfig;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use config::{Config, Environment, File, FileFormat};
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::error::{Result as FeResult, TomlFormatSnafu};
|
||||
use frontend::frontend::{FrontendOptions, TomlSerializable};
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
@@ -27,15 +28,28 @@ pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
#[derive(Serialize)]
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub kv_store: KvStoreConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub frontend: FrontendOptions,
|
||||
pub datanode: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
}
|
||||
|
||||
impl From<MixOptions> for FrontendOptions {
|
||||
fn from(value: MixOptions) -> Self {
|
||||
value.frontend
|
||||
}
|
||||
}
|
||||
|
||||
impl TomlSerializable for MixOptions {
|
||||
fn to_toml(&self) -> FeResult<String> {
|
||||
toml::to_string(self).context(TomlFormatSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Options {
|
||||
Datanode(Box<DatanodeOptions>),
|
||||
Frontend(Box<FrontendOptions>),
|
||||
@@ -144,8 +158,8 @@ mod tests {
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -263,6 +277,9 @@ mod tests {
|
||||
]
|
||||
);
|
||||
|
||||
// Should be the values from config file, not environment variables.
|
||||
assert_eq!(opts.wal.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
},
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
|
||||
use common_config::{metadata_store_dir, KvBackendConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
@@ -41,8 +42,9 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
@@ -95,9 +97,10 @@ pub struct StandaloneOptions {
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub kv_store: KvStoreConfig,
|
||||
pub metadata_store: KvBackendConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub logging: LoggingOptions,
|
||||
pub user_provider: Option<String>,
|
||||
/// Options for different store engines.
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
}
|
||||
@@ -116,9 +119,10 @@ impl Default for StandaloneOptions {
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
kv_store: KvStoreConfig::default(),
|
||||
metadata_store: KvBackendConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
user_provider: None,
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig::default()),
|
||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||
@@ -140,6 +144,7 @@ impl StandaloneOptions {
|
||||
prom_store: self.prom_store,
|
||||
meta_client: None,
|
||||
logging: self.logging,
|
||||
user_provider: self.user_provider,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -159,6 +164,7 @@ impl StandaloneOptions {
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
@@ -167,6 +173,11 @@ impl Instance {
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)?;
|
||||
info!("Datanode instance started");
|
||||
|
||||
self.procedure_manager
|
||||
.start()
|
||||
.await
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -177,6 +188,11 @@ impl Instance {
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
|
||||
self.procedure_manager
|
||||
.stop()
|
||||
.await
|
||||
.context(StopProcedureManagerSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
@@ -277,7 +293,10 @@ impl StartCommand {
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
let kv_store = opts.kv_store.clone();
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
|
||||
let metadata_store = opts.metadata_store.clone();
|
||||
let procedure = opts.procedure.clone();
|
||||
let frontend = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
@@ -285,7 +304,7 @@ impl StartCommand {
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
procedure,
|
||||
kv_store,
|
||||
metadata_store,
|
||||
data_home: datanode.storage.data_home.to_string(),
|
||||
frontend,
|
||||
datanode,
|
||||
@@ -297,12 +316,13 @@ impl StartCommand {
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
let mut fe_opts = opts.frontend;
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let fe_opts = opts.frontend.clone();
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode;
|
||||
let dn_opts = opts.datanode.clone();
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!(
|
||||
@@ -310,24 +330,32 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||
dir: &opts.data_home,
|
||||
})?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
metadata_dir,
|
||||
opts.kv_store,
|
||||
opts.procedure,
|
||||
opts.metadata_store.clone(),
|
||||
opts.procedure.clone(),
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let datanode =
|
||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let datanode = DatanodeBuilder::new(
|
||||
dn_opts.clone(),
|
||||
Some(kv_backend.clone()),
|
||||
Default::default(),
|
||||
)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
let region_server = datanode.region_server();
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
kv_store.clone(),
|
||||
kv_backend.clone(),
|
||||
Arc::new(DummyKvCacheInvalidator),
|
||||
Arc::new(StandaloneDatanodeManager(region_server.clone())),
|
||||
);
|
||||
@@ -341,32 +369,36 @@ impl StartCommand {
|
||||
// TODO: build frontend instance like in distributed mode
|
||||
let mut frontend = build_frontend(
|
||||
fe_plugins,
|
||||
kv_store,
|
||||
procedure_manager,
|
||||
kv_backend,
|
||||
procedure_manager.clone(),
|
||||
catalog_manager,
|
||||
region_server,
|
||||
)
|
||||
.await?;
|
||||
|
||||
frontend
|
||||
.build_servers(&fe_opts)
|
||||
.build_servers(opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { datanode, frontend })
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
plugins: Plugins,
|
||||
kv_store: KvBackendRef,
|
||||
kv_backend: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
region_server: RegionServer,
|
||||
) -> Result<FeInstance> {
|
||||
let frontend_instance = FeInstance::try_new_standalone(
|
||||
kv_store,
|
||||
kv_backend,
|
||||
procedure_manager,
|
||||
catalog_manager,
|
||||
plugins,
|
||||
@@ -398,6 +430,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
@@ -481,6 +514,8 @@ mod tests {
|
||||
assert_eq!(None, fe_opts.mysql.reject_no_database);
|
||||
assert!(fe_opts.influxdb.enable);
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap());
|
||||
|
||||
match &dn_opts.storage.store {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
@@ -614,7 +649,7 @@ mod tests {
|
||||
assert_eq!(options.influxdb, default_options.influxdb);
|
||||
assert_eq!(options.prom_store, default_options.prom_store);
|
||||
assert_eq!(options.wal, default_options.wal);
|
||||
assert_eq!(options.kv_store, default_options.kv_store);
|
||||
assert_eq!(options.metadata_store, default_options.metadata_store);
|
||||
assert_eq!(options.procedure, default_options.procedure);
|
||||
assert_eq!(options.logging, default_options.logging);
|
||||
assert_eq!(options.region_engine, default_options.region_engine);
|
||||
|
||||
@@ -14,10 +14,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs without any modification.
|
||||
// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::{Display, Write};
|
||||
use std::fmt::{self, Debug, Display, Write};
|
||||
use std::ops::{Div, Mul};
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -34,7 +33,7 @@ pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
|
||||
pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
|
||||
|
||||
#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd)]
|
||||
pub struct ReadableSize(pub u64);
|
||||
|
||||
impl ReadableSize {
|
||||
@@ -155,6 +154,12 @@ impl FromStr for ReadableSize {
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for ReadableSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ReadableSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.0 >= PIB {
|
||||
|
||||
@@ -20,6 +20,8 @@ use serde::{Deserialize, Serialize};
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal directory
|
||||
pub dir: Option<String>,
|
||||
// wal file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
// wal purge threshold in bytes
|
||||
@@ -36,7 +38,8 @@ pub struct WalConfig {
|
||||
impl Default for WalConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
file_size: ReadableSize::mb(256), // log file size 256MB
|
||||
dir: None,
|
||||
file_size: ReadableSize::mb(256), // log file size 256MB
|
||||
purge_threshold: ReadableSize::gb(4), // purge threshold 4GB
|
||||
purge_interval: Duration::from_secs(600),
|
||||
read_batch_size: 128,
|
||||
@@ -51,14 +54,14 @@ pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvStoreConfig {
|
||||
pub struct KvBackendConfig {
|
||||
// Kv file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
// Kv purge threshold in bytes
|
||||
pub purge_threshold: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for KvStoreConfig {
|
||||
impl Default for KvBackendConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
// log file size 256MB
|
||||
|
||||
15
src/common/decimal/Cargo.toml
Normal file
15
src/common/decimal/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "decimal"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
bigdecimal = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
rust_decimal = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu.workspace = true
|
||||
394
src/common/decimal/src/decimal128.rs
Normal file
394
src/common/decimal/src/decimal128.rs
Normal file
@@ -0,0 +1,394 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::hash::Hash;
|
||||
use std::str::FromStr;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use rust_decimal::Decimal as RustDecimal;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
self, BigDecimalOutOfRangeSnafu, Error, InvalidPrecisionOrScaleSnafu, ParseBigDecimalStrSnafu,
|
||||
ParseRustDecimalStrSnafu,
|
||||
};
|
||||
|
||||
/// The maximum precision for [Decimal128] values
|
||||
pub const DECIMAL128_MAX_PRECISION: u8 = 38;
|
||||
|
||||
/// The maximum scale for [Decimal128] values
|
||||
pub const DECIMAL128_MAX_SCALE: i8 = 38;
|
||||
|
||||
/// The default scale for [Decimal128] values
|
||||
pub const DECIMAL128_DEFAULT_SCALE: i8 = 10;
|
||||
|
||||
/// The maximum bytes length that an accurate RustDecimal can represent
|
||||
const BYTES_TO_OVERFLOW_RUST_DECIMAL: usize = 28;
|
||||
|
||||
/// 128bit decimal, using the i128 to represent the decimal.
|
||||
///
|
||||
/// **precision**: the total number of digits in the number, it's range is \[1, 38\].
|
||||
///
|
||||
/// **scale**: the number of digits to the right of the decimal point, it's range is \[0, precision\].
|
||||
#[derive(Debug, Default, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
pub struct Decimal128 {
|
||||
value: i128,
|
||||
precision: u8,
|
||||
scale: i8,
|
||||
}
|
||||
|
||||
impl Decimal128 {
|
||||
/// Create a new Decimal128 from i128, precision and scale.
|
||||
pub fn new_unchecked(value: i128, precision: u8, scale: i8) -> Self {
|
||||
Self {
|
||||
value,
|
||||
precision,
|
||||
scale,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_new(value: i128, precision: u8, scale: i8) -> error::Result<Self> {
|
||||
// make sure the precision and scale is valid.
|
||||
valid_precision_and_scale(precision, scale)?;
|
||||
Ok(Self {
|
||||
value,
|
||||
precision,
|
||||
scale,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn val(&self) -> i128 {
|
||||
self.value
|
||||
}
|
||||
|
||||
/// Returns the precision of this decimal.
|
||||
pub fn precision(&self) -> u8 {
|
||||
self.precision
|
||||
}
|
||||
|
||||
/// Returns the scale of this decimal.
|
||||
pub fn scale(&self) -> i8 {
|
||||
self.scale
|
||||
}
|
||||
|
||||
/// Convert to ScalarValue
|
||||
pub fn to_scalar_value(&self) -> (Option<i128>, u8, i8) {
|
||||
(Some(self.value), self.precision, self.scale)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Decimal128 {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.precision.eq(&other.precision)
|
||||
&& self.scale.eq(&other.scale)
|
||||
&& self.value.eq(&other.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Two decimal values can be compared if they have the same precision and scale.
|
||||
impl PartialOrd for Decimal128 {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
if self.precision == other.precision && self.scale == other.scale {
|
||||
return self.value.partial_cmp(&other.value);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from string to Decimal128
|
||||
/// If the string length is less than 28, the result of rust_decimal will underflow,
|
||||
/// In this case, use BigDecimal to get accurate result.
|
||||
impl FromStr for Decimal128 {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let len = s.as_bytes().len();
|
||||
if len <= BYTES_TO_OVERFLOW_RUST_DECIMAL {
|
||||
let rd = RustDecimal::from_str_exact(s).context(ParseRustDecimalStrSnafu { raw: s })?;
|
||||
Ok(Self::from(rd))
|
||||
} else {
|
||||
let bd = BigDecimal::from_str(s).context(ParseBigDecimalStrSnafu { raw: s })?;
|
||||
Self::try_from(bd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Decimal128 {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
format_decimal_str(&self.value.to_string(), self.precision as usize, self.scale)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for Decimal128 {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
state.write_i128(self.value);
|
||||
state.write_u8(self.precision);
|
||||
state.write_i8(self.scale);
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Decimal128> for serde_json::Value {
|
||||
fn from(decimal: Decimal128) -> Self {
|
||||
serde_json::Value::String(decimal.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Decimal128> for i128 {
|
||||
fn from(decimal: Decimal128) -> Self {
|
||||
decimal.val()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i128> for Decimal128 {
|
||||
fn from(value: i128) -> Self {
|
||||
Self {
|
||||
value,
|
||||
precision: DECIMAL128_MAX_PRECISION,
|
||||
scale: DECIMAL128_DEFAULT_SCALE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from RustDecimal to Decimal128
|
||||
/// RustDecimal can represent the range is smaller than Decimal128,
|
||||
/// it is safe to convert RustDecimal to Decimal128
|
||||
impl From<RustDecimal> for Decimal128 {
|
||||
fn from(rd: RustDecimal) -> Self {
|
||||
let s = rd.to_string();
|
||||
let precision = (s.len() - s.matches(&['.', '-'][..]).count()) as u8;
|
||||
Self {
|
||||
value: rd.mantissa(),
|
||||
precision,
|
||||
scale: rd.scale() as i8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Try from BigDecimal to Decimal128
|
||||
/// The range that BigDecimal can represent is larger than Decimal128,
|
||||
/// so it is not safe to convert BigDecimal to Decimal128,
|
||||
/// If the BigDecimal is out of range, return error.
|
||||
impl TryFrom<BigDecimal> for Decimal128 {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: BigDecimal) -> Result<Self, Self::Error> {
|
||||
let precision = value.digits();
|
||||
let (big_int, scale) = value.as_bigint_and_exponent();
|
||||
// convert big_int to i128, if convert failed, return error
|
||||
big_int
|
||||
.to_i128()
|
||||
.map(|val| Self::try_new(val, precision as u8, scale as i8))
|
||||
.unwrap_or_else(|| BigDecimalOutOfRangeSnafu { value }.fail())
|
||||
}
|
||||
}
|
||||
|
||||
/// Port from arrow-rs,
|
||||
/// see https://github.com/Apache/arrow-rs/blob/master/arrow-array/src/types.rs#L1323-L1344
|
||||
fn format_decimal_str(value_str: &str, precision: usize, scale: i8) -> String {
|
||||
let (sign, rest) = match value_str.strip_prefix('-') {
|
||||
Some(stripped) => ("-", stripped),
|
||||
None => ("", value_str),
|
||||
};
|
||||
|
||||
let bound = precision.min(rest.len()) + sign.len();
|
||||
let value_str = &value_str[0..bound];
|
||||
|
||||
if scale == 0 {
|
||||
value_str.to_string()
|
||||
} else if scale < 0 {
|
||||
let padding = value_str.len() + scale.unsigned_abs() as usize;
|
||||
format!("{value_str:0<padding$}")
|
||||
} else if rest.len() > scale as usize {
|
||||
// Decimal separator is in the middle of the string
|
||||
let (whole, decimal) = value_str.split_at(value_str.len() - scale as usize);
|
||||
format!("{whole}.{decimal}")
|
||||
} else {
|
||||
// String has to be padded
|
||||
format!("{}0.{:0>width$}", sign, rest, width = scale as usize)
|
||||
}
|
||||
}
|
||||
|
||||
/// check whether precision and scale is valid
|
||||
fn valid_precision_and_scale(precision: u8, scale: i8) -> error::Result<()> {
|
||||
if precision == 0 {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!(
|
||||
"precision cannot be 0, has to be between [1, {}]",
|
||||
DECIMAL128_MAX_PRECISION
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if precision > DECIMAL128_MAX_PRECISION {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!(
|
||||
"precision {} is greater than max {}",
|
||||
precision, DECIMAL128_MAX_PRECISION
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if scale > DECIMAL128_MAX_SCALE {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!(
|
||||
"scale {} is greater than max {}",
|
||||
scale, DECIMAL128_MAX_SCALE
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
if scale > 0 && scale > precision as i8 {
|
||||
return InvalidPrecisionOrScaleSnafu {
|
||||
reason: format!("scale {} is greater than precision {}", scale, precision),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_common_decimal128() {
|
||||
let decimal = Decimal128::new_unchecked(123456789, 9, 3);
|
||||
assert_eq!(decimal.to_string(), "123456.789");
|
||||
|
||||
let decimal = Decimal128::try_new(123456789, 9, 0);
|
||||
assert_eq!(decimal.unwrap().to_string(), "123456789");
|
||||
|
||||
let decimal = Decimal128::try_new(123456789, 9, 2);
|
||||
assert_eq!(decimal.unwrap().to_string(), "1234567.89");
|
||||
|
||||
let decimal = Decimal128::try_new(123, 3, -2);
|
||||
assert_eq!(decimal.unwrap().to_string(), "12300");
|
||||
|
||||
// invalid precision or scale
|
||||
|
||||
// precision is 0
|
||||
let decimal = Decimal128::try_new(123, 0, 0);
|
||||
assert!(decimal.is_err());
|
||||
|
||||
// precision is greater than 38
|
||||
let decimal = Decimal128::try_new(123, 39, 0);
|
||||
assert!(decimal.is_err());
|
||||
|
||||
// scale is greater than 38
|
||||
let decimal = Decimal128::try_new(123, 38, 39);
|
||||
assert!(decimal.is_err());
|
||||
|
||||
// scale is greater than precision
|
||||
let decimal = Decimal128::try_new(123, 3, 4);
|
||||
assert!(decimal.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal128_from_str() {
|
||||
// 0 < precision <= 28
|
||||
let decimal = Decimal128::from_str("1234567890.123456789").unwrap();
|
||||
assert_eq!(decimal.to_string(), "1234567890.123456789");
|
||||
assert_eq!(decimal.precision(), 19);
|
||||
assert_eq!(decimal.scale(), 9);
|
||||
|
||||
let decimal = Decimal128::from_str("1234567890.123456789012345678").unwrap();
|
||||
assert_eq!(decimal.to_string(), "1234567890.123456789012345678");
|
||||
assert_eq!(decimal.precision(), 28);
|
||||
assert_eq!(decimal.scale(), 18);
|
||||
|
||||
// 28 < precision <= 38
|
||||
let decimal = Decimal128::from_str("1234567890.1234567890123456789012").unwrap();
|
||||
assert_eq!(decimal.to_string(), "1234567890.1234567890123456789012");
|
||||
assert_eq!(decimal.precision(), 32);
|
||||
assert_eq!(decimal.scale(), 22);
|
||||
|
||||
let decimal = Decimal128::from_str("1234567890.1234567890123456789012345678").unwrap();
|
||||
assert_eq!(
|
||||
decimal.to_string(),
|
||||
"1234567890.1234567890123456789012345678"
|
||||
);
|
||||
assert_eq!(decimal.precision(), 38);
|
||||
assert_eq!(decimal.scale(), 28);
|
||||
|
||||
// precision > 38
|
||||
let decimal = Decimal128::from_str("1234567890.12345678901234567890123456789");
|
||||
assert!(decimal.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_parse_decimal128_speed() {
|
||||
// RustDecimal::from_str: 1.124855167s
|
||||
for _ in 0..1500000 {
|
||||
let _ = RustDecimal::from_str("1234567890.123456789012345678999").unwrap();
|
||||
}
|
||||
|
||||
// BigDecimal::try_from: 6.799290042s
|
||||
for _ in 0..1500000 {
|
||||
let _ = BigDecimal::from_str("1234567890.123456789012345678999").unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal128_precision_and_scale() {
|
||||
// precision and scale from Deicmal(1,1) to Decimal(38,38)
|
||||
for precision in 1..=38 {
|
||||
for scale in 1..=precision {
|
||||
let decimal_str = format!("0.{}", "1".repeat(scale as usize));
|
||||
let decimal = Decimal128::from_str(&decimal_str).unwrap();
|
||||
assert_eq!(decimal_str, decimal.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decimal128_compare() {
|
||||
// the same precision and scale
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
assert!(decimal1 == decimal2);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal1 > decimal2);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal2 < decimal1);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal1 >= decimal2);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal2 <= decimal1);
|
||||
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123456789012345678998").unwrap();
|
||||
assert!(decimal1 != decimal2);
|
||||
|
||||
// different precision and scale cmp is None
|
||||
let decimal1 = Decimal128::from_str("1234567890.123456789012345678999").unwrap();
|
||||
let decimal2 = Decimal128::from_str("1234567890.123").unwrap();
|
||||
assert_eq!(decimal1.partial_cmp(&decimal2), None);
|
||||
}
|
||||
}
|
||||
72
src/common/decimal/src/error.rs
Normal file
72
src/common/decimal/src/error.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use bigdecimal::BigDecimal;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Decimal out of range, decimal value: {}", value))]
|
||||
BigDecimalOutOfRange {
|
||||
value: BigDecimal,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse string to rust decimal, raw: {}", raw))]
|
||||
ParseRustDecimalStr {
|
||||
raw: String,
|
||||
#[snafu(source)]
|
||||
error: rust_decimal::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse string to big decimal, raw: {}", raw))]
|
||||
ParseBigDecimalStr {
|
||||
raw: String,
|
||||
#[snafu(source)]
|
||||
error: bigdecimal::ParseBigDecimalError,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid precision or scale, resion: {}", reason))]
|
||||
InvalidPrecisionOrScale { reason: String, location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { .. } => StatusCode::Internal,
|
||||
Error::ParseRustDecimalStr { .. }
|
||||
| Error::InvalidPrecisionOrScale { .. }
|
||||
| Error::ParseBigDecimalStr { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
fn location_opt(&self) -> Option<common_error::snafu::Location> {
|
||||
match self {
|
||||
Error::BigDecimalOutOfRange { location, .. } => Some(*location),
|
||||
Error::InvalidPrecisionOrScale { location, .. } => Some(*location),
|
||||
Error::ParseRustDecimalStr { .. } | Error::ParseBigDecimalStr { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
16
src/common/decimal/src/lib.rs
Normal file
16
src/common/decimal/src/lib.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod decimal128;
|
||||
pub mod error;
|
||||
@@ -39,17 +39,25 @@ pub trait ErrorExt: StackError {
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let error = self.last();
|
||||
if let Some(external_error) = error.source() {
|
||||
let external_root = external_error.sources().last().unwrap();
|
||||
|
||||
if error.to_string().is_empty() {
|
||||
format!("{external_root}")
|
||||
} else {
|
||||
format!("{error}: {external_root}")
|
||||
match self.status_code() {
|
||||
StatusCode::Unknown | StatusCode::Internal => {
|
||||
// masks internal error from end user
|
||||
format!("Internal error: {}", self.status_code() as u32)
|
||||
}
|
||||
_ => {
|
||||
let error = self.last();
|
||||
if let Some(external_error) = error.source() {
|
||||
let external_root = external_error.sources().last().unwrap();
|
||||
|
||||
if error.to_string().is_empty() {
|
||||
format!("{external_root}")
|
||||
} else {
|
||||
format!("{error}: {external_root}")
|
||||
}
|
||||
} else {
|
||||
format!("{error}")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
format!("{error}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use strum::EnumString;
|
||||
use strum::{AsRefStr, EnumString};
|
||||
|
||||
/// Common status code for public API.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr)]
|
||||
pub enum StatusCode {
|
||||
// ====== Begin of common status code ==============
|
||||
/// Success.
|
||||
|
||||
@@ -58,9 +58,15 @@ impl Function for RangeFunction {
|
||||
"range_fn"
|
||||
}
|
||||
|
||||
// range_fn will never been used, return_type could be arbitrary value, is not important
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
// The first argument to range_fn is the expression to be evaluated
|
||||
fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
input_types
|
||||
.first()
|
||||
.cloned()
|
||||
.ok_or(DataFusionError::Internal(
|
||||
"No expr found in range_fn".into(),
|
||||
))
|
||||
.context(GeneralDataFusionSnafu)
|
||||
}
|
||||
|
||||
/// `range_fn` will never been used. As long as a legal signature is returned, the specific content of the signature does not matter.
|
||||
|
||||
@@ -16,7 +16,8 @@ use std::fmt;
|
||||
|
||||
use common_query::error::{self, Result};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::arrow::compute::kernels::{arithmetic, cast};
|
||||
use datafusion::arrow::compute::kernels::numeric;
|
||||
use datatypes::arrow::compute::kernels::cast;
|
||||
use datatypes::arrow::datatypes::DataType;
|
||||
use datatypes::prelude::*;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
@@ -51,11 +52,11 @@ impl Function for RateFunction {
|
||||
let val = &columns[0].to_arrow_array();
|
||||
let val_0 = val.slice(0, val.len() - 1);
|
||||
let val_1 = val.slice(1, val.len() - 1);
|
||||
let dv = arithmetic::subtract_dyn(&val_1, &val_0).context(error::ArrowComputeSnafu)?;
|
||||
let dv = numeric::sub(&val_1, &val_0).context(error::ArrowComputeSnafu)?;
|
||||
let ts = &columns[1].to_arrow_array();
|
||||
let ts_0 = ts.slice(0, ts.len() - 1);
|
||||
let ts_1 = ts.slice(1, ts.len() - 1);
|
||||
let dt = arithmetic::subtract_dyn(&ts_1, &ts_0).context(error::ArrowComputeSnafu)?;
|
||||
let dt = numeric::sub(&ts_1, &ts_0).context(error::ArrowComputeSnafu)?;
|
||||
|
||||
let dv = cast::cast(&dv, &DataType::Float64).context(error::TypeCastSnafu {
|
||||
typ: DataType::Float64,
|
||||
@@ -63,7 +64,7 @@ impl Function for RateFunction {
|
||||
let dt = cast::cast(&dt, &DataType::Float64).context(error::TypeCastSnafu {
|
||||
typ: DataType::Float64,
|
||||
})?;
|
||||
let rate = arithmetic::divide_dyn(&dv, &dt).context(error::ArrowComputeSnafu)?;
|
||||
let rate = numeric::div(&dv, &dt).context(error::ArrowComputeSnafu)?;
|
||||
let v = Helper::try_into_vector(&rate).context(error::FromArrowArraySnafu)?;
|
||||
|
||||
Ok(v)
|
||||
|
||||
@@ -18,9 +18,9 @@ use common_query::error::{
|
||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datafusion::arrow::compute::kernels::cmp::gt;
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::comparison::gt_dyn;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
@@ -72,7 +72,7 @@ impl Function for GreatestFunction {
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
@@ -82,7 +82,7 @@ impl Function for GreatestFunction {
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = columns[1].to_arrow_array();
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
|
||||
@@ -18,36 +18,50 @@ use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use common_time::{Date, DateTime, Timestamp};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::vectors::{
|
||||
Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef,
|
||||
};
|
||||
use datatypes::vectors::{Int64Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
/// A function to convert the column into the unix timestamp in seconds.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ToUnixtimeFunction;
|
||||
|
||||
const NAME: &str = "to_unixtime";
|
||||
|
||||
fn convert_to_seconds(arg: &str) -> Option<i64> {
|
||||
match Timestamp::from_str(arg) {
|
||||
Ok(ts) => {
|
||||
let sec_mul = (TimeUnit::Second.factor() / ts.unit().factor()) as i64;
|
||||
Some(ts.value().div_euclid(sec_mul))
|
||||
}
|
||||
Err(_err) => None,
|
||||
if let Ok(dt) = DateTime::from_str(arg) {
|
||||
return Some(dt.val() / 1000);
|
||||
}
|
||||
|
||||
if let Ok(ts) = Timestamp::from_str(arg) {
|
||||
return Some(ts.split().0);
|
||||
}
|
||||
|
||||
if let Ok(date) = Date::from_str(arg) {
|
||||
return Some(date.to_secs());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> {
|
||||
fn convert_timestamps_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value())))
|
||||
.map(|i| vector.get(i).as_timestamp().map(|ts| ts.split().0))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
fn convert_dates_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| vector.get(i).as_date().map(|dt| dt.to_secs()))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
fn convert_datetimes_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| vector.get(i).as_datetime().map(|dt| dt.val() / 1000))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
@@ -67,6 +81,8 @@ impl Function for ToUnixtimeFunction {
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -87,51 +103,29 @@ impl Function for ToUnixtimeFunction {
|
||||
}
|
||||
);
|
||||
|
||||
let vector = &columns[0];
|
||||
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let vector = StringVector::try_from_arrow_array(&array).unwrap();
|
||||
Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
)))
|
||||
}
|
||||
ConcreteDataType::String(_) => Ok(Arc::new(Int64Vector::from(
|
||||
(0..vector.len())
|
||||
.map(|i| convert_to_seconds(&vector.get(i).to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
))),
|
||||
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap()))
|
||||
// Safety: cast always successfully at here
|
||||
Ok(vector.cast(&ConcreteDataType::int64_datatype()).unwrap())
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts) => {
|
||||
let array = columns[0].to_arrow_array();
|
||||
let value = match ts {
|
||||
TimestampType::Second(_) => {
|
||||
let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array(
|
||||
array
|
||||
)
|
||||
.unwrap());
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Millisecond(_) => {
|
||||
let vector = paste::expr!(
|
||||
TimestampMillisecondVector::try_from_arrow_array(array).unwrap()
|
||||
);
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Microsecond(_) => {
|
||||
let vector = paste::expr!(
|
||||
TimestampMicrosecondVector::try_from_arrow_array(array).unwrap()
|
||||
);
|
||||
process_vector(&vector)
|
||||
}
|
||||
TimestampType::Nanosecond(_) => {
|
||||
let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array(
|
||||
array
|
||||
)
|
||||
.unwrap());
|
||||
process_vector(&vector)
|
||||
}
|
||||
};
|
||||
Ok(Arc::new(Int64Vector::from(value)))
|
||||
ConcreteDataType::Date(_) => {
|
||||
let seconds = convert_dates_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let seconds = convert_datetimes_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let seconds = convert_timestamps_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
@@ -151,11 +145,11 @@ impl fmt::Display for ToUnixtimeFunction {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::timestamp::TimestampSecond;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{StringVector, TimestampSecondVector};
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, StringVector, TimestampMillisecondVector, TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
use crate::scalars::Function;
|
||||
@@ -170,18 +164,20 @@ mod tests {
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times = vec![
|
||||
@@ -212,26 +208,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_int_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times = vec![Some(3_i64), None, Some(5_i64), None];
|
||||
let results = [Some(3), None, Some(5), None];
|
||||
@@ -254,38 +230,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
fn test_date_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
assert_eq!("to_unixtime", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::int64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(1, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
]
|
||||
));
|
||||
|
||||
let times: Vec<Option<TimestampSecond>> = vec![
|
||||
Some(TimestampSecond::new(123)),
|
||||
None,
|
||||
Some(TimestampSecond::new(42)),
|
||||
None,
|
||||
];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector: TimestampSecondVector = build_vector_from_slice(×);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let times = vec![Some(123), None, Some(42), None];
|
||||
let results = [Some(10627200), None, Some(3628800), None];
|
||||
let date_vector = DateVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
@@ -303,11 +254,73 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
|
||||
let mut builder = T::Builder::with_capacity(items.len());
|
||||
for item in items {
|
||||
builder.push(*item);
|
||||
#[test]
|
||||
fn test_datetime_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
|
||||
let times = vec![Some(123000), None, Some(42000), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let date_vector = DateTimeVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
|
||||
let times = vec![Some(123), None, Some(42), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector = TimestampSecondVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
let times = vec![Some(123000), None, Some(42000), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let ts_vector = TimestampMillisecondVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(ts_vector)];
|
||||
let vector = f.eval(FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
use std::env;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_runtime::error::{Error, Result};
|
||||
@@ -24,7 +26,7 @@ use reqwest::{Client, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The URL to report telemetry data.
|
||||
pub const TELEMETRY_URL: &str = "https://api.greptime.cloud/db/otel/statistics";
|
||||
pub const TELEMETRY_URL: &str = "https://telemetry.greptimestats.com/db/otel/statistics";
|
||||
/// The local installation uuid cache file
|
||||
const UUID_FILE_NAME: &str = ".greptimedb-telemetry-uuid";
|
||||
|
||||
@@ -36,13 +38,29 @@ const GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT: Duration = Duration::from_sec
|
||||
const GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
pub enum GreptimeDBTelemetryTask {
|
||||
Enable(RepeatedTask<Error>),
|
||||
Enable((RepeatedTask<Error>, Arc<AtomicBool>)),
|
||||
Disable,
|
||||
}
|
||||
|
||||
impl GreptimeDBTelemetryTask {
|
||||
pub fn enable(interval: Duration, task_fn: BoxedTaskFunction<Error>) -> Self {
|
||||
GreptimeDBTelemetryTask::Enable(RepeatedTask::new(interval, task_fn))
|
||||
pub fn should_report(&self, value: bool) {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable((_, should_report)) => {
|
||||
should_report.store(value, Ordering::Relaxed);
|
||||
}
|
||||
GreptimeDBTelemetryTask::Disable => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable(
|
||||
interval: Duration,
|
||||
task_fn: BoxedTaskFunction<Error>,
|
||||
should_report: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
GreptimeDBTelemetryTask::Enable((
|
||||
RepeatedTask::new(interval, task_fn).with_initial_delay(Some(Duration::ZERO)),
|
||||
should_report,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn disable() -> Self {
|
||||
@@ -51,7 +69,7 @@ impl GreptimeDBTelemetryTask {
|
||||
|
||||
pub fn start(&self) -> Result<()> {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable(task) => {
|
||||
GreptimeDBTelemetryTask::Enable((task, _)) => {
|
||||
print_anonymous_usage_data_disclaimer();
|
||||
task.start(common_runtime::bg_runtime())
|
||||
}
|
||||
@@ -61,7 +79,7 @@ impl GreptimeDBTelemetryTask {
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
match self {
|
||||
GreptimeDBTelemetryTask::Enable(task) => task.stop().await,
|
||||
GreptimeDBTelemetryTask::Enable((task, _)) => task.stop().await,
|
||||
GreptimeDBTelemetryTask::Disable => Ok(()),
|
||||
}
|
||||
}
|
||||
@@ -191,6 +209,8 @@ pub struct GreptimeDBTelemetry {
|
||||
client: Option<Client>,
|
||||
working_home: Option<String>,
|
||||
telemetry_url: &'static str,
|
||||
should_report: Arc<AtomicBool>,
|
||||
report_times: usize,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -200,13 +220,19 @@ impl TaskFunction<Error> for GreptimeDBTelemetry {
|
||||
}
|
||||
|
||||
async fn call(&mut self) -> Result<()> {
|
||||
self.report_telemetry_info().await;
|
||||
if self.should_report.load(Ordering::Relaxed) {
|
||||
self.report_telemetry_info().await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GreptimeDBTelemetry {
|
||||
pub fn new(working_home: Option<String>, statistics: Box<dyn Collector + Send + Sync>) -> Self {
|
||||
pub fn new(
|
||||
working_home: Option<String>,
|
||||
statistics: Box<dyn Collector + Send + Sync>,
|
||||
should_report: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let client = Client::builder()
|
||||
.connect_timeout(GREPTIMEDB_TELEMETRY_CLIENT_CONNECT_TIMEOUT)
|
||||
.timeout(GREPTIMEDB_TELEMETRY_CLIENT_REQUEST_TIMEOUT)
|
||||
@@ -216,6 +242,8 @@ impl GreptimeDBTelemetry {
|
||||
statistics,
|
||||
client: client.ok(),
|
||||
telemetry_url: TELEMETRY_URL,
|
||||
should_report,
|
||||
report_times: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,8 +261,11 @@ impl GreptimeDBTelemetry {
|
||||
};
|
||||
|
||||
if let Some(client) = self.client.as_ref() {
|
||||
info!("reporting greptimedb version: {:?}", data);
|
||||
if self.report_times == 0 {
|
||||
info!("reporting greptimedb version: {:?}", data);
|
||||
}
|
||||
let result = client.post(self.telemetry_url).json(&data).send().await;
|
||||
self.report_times += 1;
|
||||
debug!("report version result: {:?}", result);
|
||||
result.ok()
|
||||
} else {
|
||||
@@ -250,7 +281,8 @@ impl GreptimeDBTelemetry {
|
||||
mod tests {
|
||||
use std::convert::Infallible;
|
||||
use std::env;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_test_util::ports;
|
||||
@@ -370,7 +402,11 @@ mod tests {
|
||||
let working_home = working_home_temp.path().to_str().unwrap().to_string();
|
||||
|
||||
let test_statistic = Box::new(TestStatistic);
|
||||
let mut test_report = GreptimeDBTelemetry::new(Some(working_home.clone()), test_statistic);
|
||||
let mut test_report = GreptimeDBTelemetry::new(
|
||||
Some(working_home.clone()),
|
||||
test_statistic,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
);
|
||||
let url = Box::leak(format!("{}:{}", "http://localhost", port).into_boxed_str());
|
||||
test_report.telemetry_url = url;
|
||||
let response = test_report.report_telemetry_info().await.unwrap();
|
||||
@@ -384,7 +420,11 @@ mod tests {
|
||||
assert_eq!(1, body.nodes.unwrap());
|
||||
|
||||
let failed_statistic = Box::new(FailedStatistic);
|
||||
let mut failed_report = GreptimeDBTelemetry::new(Some(working_home), failed_statistic);
|
||||
let mut failed_report = GreptimeDBTelemetry::new(
|
||||
Some(working_home),
|
||||
failed_statistic,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
);
|
||||
failed_report.telemetry_url = url;
|
||||
let response = failed_report.report_telemetry_info().await;
|
||||
assert!(response.is_none());
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
@@ -31,8 +32,8 @@ use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsCon
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 1;
|
||||
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: ReadableSize = ReadableSize::mb(512);
|
||||
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: ReadableSize = ReadableSize::mb(512);
|
||||
|
||||
lazy_static! {
|
||||
static ref ID: AtomicU64 = AtomicU64::new(0);
|
||||
@@ -162,7 +163,14 @@ impl ChannelManager {
|
||||
}
|
||||
|
||||
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
|
||||
let mut endpoint = Endpoint::new(format!("http://{addr}")).context(CreateChannelSnafu)?;
|
||||
let http_prefix = if self.client_tls_config.is_some() {
|
||||
"https"
|
||||
} else {
|
||||
"http"
|
||||
};
|
||||
|
||||
let mut endpoint =
|
||||
Endpoint::new(format!("{http_prefix}://{addr}")).context(CreateChannelSnafu)?;
|
||||
|
||||
if let Some(dur) = self.config.timeout {
|
||||
endpoint = endpoint.timeout(dur);
|
||||
@@ -250,9 +258,9 @@ pub struct ChannelConfig {
|
||||
pub tcp_nodelay: bool,
|
||||
pub client_tls: Option<ClientTlsOption>,
|
||||
// Max gRPC receiving(decoding) message size
|
||||
pub max_recv_message_size: usize,
|
||||
pub max_recv_message_size: ReadableSize,
|
||||
// Max gRPC sending(encoding) message size
|
||||
pub max_send_message_size: usize,
|
||||
pub max_send_message_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for ChannelConfig {
|
||||
|
||||
@@ -241,7 +241,7 @@ mod test {
|
||||
.unwrap();
|
||||
|
||||
let flight_data = batches_to_flight_data(
|
||||
arrow_schema,
|
||||
&arrow_schema,
|
||||
vec![
|
||||
batch1.clone().into_df_record_batch(),
|
||||
batch2.clone().into_df_record_batch(),
|
||||
|
||||
@@ -10,6 +10,7 @@ testing = []
|
||||
[dependencies]
|
||||
api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
async-recursion = "1.0"
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64 = "0.21"
|
||||
@@ -28,7 +29,7 @@ etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
lazy_static.workspace = true
|
||||
metrics.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
@@ -38,6 +39,7 @@ store-api = { workspace = true }
|
||||
strum.workspace = true
|
||||
table = { workspace = true }
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -374,10 +374,11 @@ impl Procedure for AlterTableProcedure {
|
||||
|
||||
let state = &self.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_ALTER_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
let step = state.as_ref();
|
||||
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_ALTER_TABLE
|
||||
.with_label_values(&[step])
|
||||
.start_timer();
|
||||
|
||||
match state {
|
||||
AlterTableState::Prepare => self.on_prepare().await,
|
||||
|
||||
@@ -251,10 +251,9 @@ impl Procedure for CreateTableProcedure {
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.creator.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_CREATE_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_CREATE_TABLE
|
||||
.with_label_values(&[state.as_ref()])
|
||||
.start_timer();
|
||||
|
||||
match state {
|
||||
CreateTableState::Prepare => self.on_prepare().await,
|
||||
|
||||
@@ -197,10 +197,9 @@ impl Procedure for DropTableProcedure {
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_DROP_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_DROP_TABLE
|
||||
.with_label_values(&[state.as_ref()])
|
||||
.start_timer();
|
||||
|
||||
match self.data.state {
|
||||
DropTableState::Prepare => self.on_prepare().await,
|
||||
|
||||
@@ -55,10 +55,9 @@ impl Procedure for TruncateTableProcedure {
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_TRUNCATE_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_TRUNCATE_TABLE
|
||||
.with_label_values(&[state.as_ref()])
|
||||
.start_timer();
|
||||
|
||||
match self.data.state {
|
||||
TruncateTableState::Prepare => self.on_prepare().await,
|
||||
|
||||
@@ -28,6 +28,26 @@ use crate::peer::Peer;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Empty key is not allowed"))]
|
||||
EmptyKey { location: Location },
|
||||
|
||||
#[snafu(display("Invalid result with a txn response: {}", err_msg))]
|
||||
InvalidTxnResult { err_msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd"))]
|
||||
ConnectEtcd {
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute via Etcd"))]
|
||||
EtcdFailed {
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get sequence: {}", err_msg))]
|
||||
NextSequence { err_msg: String, location: Location },
|
||||
|
||||
@@ -254,7 +274,10 @@ impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
use Error::*;
|
||||
match self {
|
||||
IllegalServerState { .. } | EtcdTxnOpResponse { .. } => StatusCode::Internal,
|
||||
IllegalServerState { .. }
|
||||
| EtcdTxnOpResponse { .. }
|
||||
| EtcdFailed { .. }
|
||||
| ConnectEtcd { .. } => StatusCode::Internal,
|
||||
|
||||
SerdeJson { .. }
|
||||
| ParseOption { .. }
|
||||
@@ -267,7 +290,8 @@ impl ErrorExt for Error {
|
||||
| NextSequence { .. }
|
||||
| SequenceOutOfRange { .. }
|
||||
| UnexpectedSequenceValue { .. }
|
||||
| InvalidHeartbeatResponse { .. } => StatusCode::Unexpected,
|
||||
| InvalidHeartbeatResponse { .. }
|
||||
| InvalidTxnResult { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. }
|
||||
| GetKvCache { .. }
|
||||
@@ -277,7 +301,7 @@ impl ErrorExt for Error {
|
||||
| RenameTable { .. }
|
||||
| Unsupported { .. } => StatusCode::Internal,
|
||||
|
||||
PrimaryKeyNotFound { .. } => StatusCode::InvalidArguments,
|
||||
PrimaryKeyNotFound { .. } | &EmptyKey { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
|
||||
@@ -319,4 +343,16 @@ impl Error {
|
||||
pub fn is_retry_later(&self) -> bool {
|
||||
matches!(self, Error::RetryLater { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the response exceeds the size limit.
|
||||
pub fn is_exceeded_size_limit(&self) -> bool {
|
||||
if let Error::EtcdFailed {
|
||||
error: etcd_client::Error::GRpcStatus(status),
|
||||
..
|
||||
} = self
|
||||
{
|
||||
return status.code() == tonic::Code::OutOfRange;
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +54,8 @@ pub mod table_region;
|
||||
// TODO(weny): removes it.
|
||||
#[allow(deprecated)]
|
||||
pub mod table_route;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_utils;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Debug;
|
||||
@@ -89,12 +91,20 @@ pub const REMOVED_PREFIX: &str = "__removed";
|
||||
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||
|
||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
||||
const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
|
||||
const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
|
||||
const TABLE_ROUTE_PREFIX: &str = "__table_route";
|
||||
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||
pub const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
|
||||
pub const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
|
||||
pub const TABLE_ROUTE_PREFIX: &str = "__table_route";
|
||||
|
||||
pub const CACHE_KEY_PREFIXES: [&str; 4] = [
|
||||
TABLE_NAME_KEY_PREFIX,
|
||||
CATALOG_NAME_KEY_PREFIX,
|
||||
SCHEMA_NAME_KEY_PREFIX,
|
||||
TABLE_ROUTE_PREFIX,
|
||||
];
|
||||
|
||||
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
|
||||
|
||||
@@ -258,7 +268,6 @@ impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
|
||||
self.bytes.to_vec()
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
/// Notes: used for test purpose.
|
||||
pub fn from_inner(inner: T) -> Self {
|
||||
let bytes = serde_json::to_vec(&inner).unwrap();
|
||||
@@ -685,12 +694,11 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use futures::TryStreamExt;
|
||||
use table::metadata::{RawTableInfo, TableInfo, TableInfoBuilder, TableMetaBuilder};
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
|
||||
use super::datanode_table::DatanodeTableKey;
|
||||
use super::test_utils;
|
||||
use crate::ddl::utils::region_storage_path;
|
||||
use crate::key::datanode_table::RegionInfo;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
@@ -736,40 +744,6 @@ mod tests {
|
||||
assert_eq!(removed, to_removed_key(key));
|
||||
}
|
||||
|
||||
fn new_test_table_info(region_numbers: impl Iterator<Item = u32>) -> TableInfo {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_time_index(true),
|
||||
ColumnSchema::new("col2", ConcreteDataType::int32_datatype(), true),
|
||||
];
|
||||
let schema = SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.version(123)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(Arc::new(schema))
|
||||
.primary_key_indices(vec![0])
|
||||
.engine("engine")
|
||||
.next_column_id(3)
|
||||
.region_numbers(region_numbers.collect::<Vec<_>>())
|
||||
.build()
|
||||
.unwrap();
|
||||
TableInfoBuilder::default()
|
||||
.table_id(10)
|
||||
.table_version(5)
|
||||
.name("mytable")
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn new_test_region_route() -> RegionRoute {
|
||||
new_region_route(1, 2)
|
||||
}
|
||||
@@ -784,9 +758,14 @@ mod tests {
|
||||
},
|
||||
leader_peer: Some(Peer::new(datanode, "a2")),
|
||||
follower_peers: vec![],
|
||||
leader_status: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn new_test_table_info(region_numbers: impl Iterator<Item = u32>) -> TableInfo {
|
||||
test_utils::new_test_table_info(10, region_numbers)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
|
||||
@@ -16,10 +16,8 @@ use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_telemetry::timer;
|
||||
use futures::stream::BoxStream;
|
||||
use futures::StreamExt;
|
||||
use metrics::increment_counter;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
@@ -104,7 +102,7 @@ impl CatalogManager {
|
||||
|
||||
/// Creates `CatalogNameKey`.
|
||||
pub async fn create(&self, catalog: CatalogNameKey<'_>, if_not_exists: bool) -> Result<()> {
|
||||
let _timer = timer!(crate::metrics::METRIC_META_CREATE_CATALOG);
|
||||
let _timer = crate::metrics::METRIC_META_CREATE_CATALOG.start_timer();
|
||||
|
||||
let raw_key = catalog.as_raw_key();
|
||||
let raw_value = CatalogNameValue.try_as_raw_value()?;
|
||||
@@ -113,7 +111,7 @@ impl CatalogManager {
|
||||
.put_conditionally(raw_key, raw_value, if_not_exists)
|
||||
.await?
|
||||
{
|
||||
increment_counter!(crate::metrics::METRIC_META_CREATE_CATALOG);
|
||||
crate::metrics::METRIC_META_CREATE_CATALOG_COUNTER.inc();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -18,11 +18,9 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_telemetry::timer;
|
||||
use futures::stream::BoxStream;
|
||||
use futures::StreamExt;
|
||||
use humantime_serde::re::humantime;
|
||||
use metrics::increment_counter;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
@@ -146,7 +144,7 @@ impl SchemaManager {
|
||||
value: Option<SchemaNameValue>,
|
||||
if_not_exists: bool,
|
||||
) -> Result<()> {
|
||||
let _timer = timer!(crate::metrics::METRIC_META_CREATE_SCHEMA);
|
||||
let _timer = crate::metrics::METRIC_META_CREATE_SCHEMA.start_timer();
|
||||
|
||||
let raw_key = schema.as_raw_key();
|
||||
let raw_value = value.unwrap_or_default().try_as_raw_value()?;
|
||||
@@ -155,7 +153,7 @@ impl SchemaManager {
|
||||
.put_conditionally(raw_key, raw_value, if_not_exists)
|
||||
.await?
|
||||
{
|
||||
increment_counter!(crate::metrics::METRIC_META_CREATE_SCHEMA);
|
||||
crate::metrics::METRIC_META_CREATE_SCHEMA_COUNTER.inc();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
@@ -21,6 +23,7 @@ use crate::error::Result;
|
||||
use crate::key::{to_removed_key, TableMetaKey};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
pub struct TableInfoKey {
|
||||
@@ -233,6 +236,37 @@ impl TableInfoManager {
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn batch_get(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<HashMap<TableId, TableInfoValue>> {
|
||||
let lookup_table = table_ids
|
||||
.iter()
|
||||
.map(|id| (TableInfoKey::new(*id).as_raw_key(), id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest {
|
||||
keys: lookup_table.keys().cloned().collect::<Vec<_>>(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let values = resp
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| {
|
||||
Ok((
|
||||
// Safety: must exist.
|
||||
**lookup_table.get(kv.key()).unwrap(),
|
||||
TableInfoValue::try_from_raw_value(&kv.value)?,
|
||||
))
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>>>()?;
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -23,6 +24,7 @@ use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_P
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, RegionRoute};
|
||||
use crate::rpc::store::BatchGetRequest;
|
||||
|
||||
pub struct TableRouteKey {
|
||||
pub table_id: TableId,
|
||||
@@ -197,6 +199,38 @@ impl TableRouteManager {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// It may return a subset of the `table_ids`.
|
||||
pub async fn batch_get(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<HashMap<TableId, TableRouteValue>> {
|
||||
let lookup_table = table_ids
|
||||
.iter()
|
||||
.map(|id| (TableRouteKey::new(*id).as_raw_key(), id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let resp = self
|
||||
.kv_backend
|
||||
.batch_get(BatchGetRequest {
|
||||
keys: lookup_table.keys().cloned().collect::<Vec<_>>(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let values = resp
|
||||
.kvs
|
||||
.iter()
|
||||
.map(|kv| {
|
||||
Ok((
|
||||
// Safety: must exist.
|
||||
**lookup_table.get(kv.key()).unwrap(),
|
||||
TableRouteValue::try_from_raw_value(&kv.value)?,
|
||||
))
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>>>()?;
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn get_removed(
|
||||
&self,
|
||||
|
||||
57
src/common/meta/src/key/test_utils.rs
Normal file
57
src/common/meta/src/key/test_utils.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use store_api::storage::TableId;
|
||||
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder};
|
||||
|
||||
pub fn new_test_table_info<I: IntoIterator<Item = u32>>(
|
||||
table_id: TableId,
|
||||
region_numbers: I,
|
||||
) -> TableInfo {
|
||||
let column_schemas = vec![
|
||||
ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
)
|
||||
.with_time_index(true),
|
||||
ColumnSchema::new("col2", ConcreteDataType::int32_datatype(), true),
|
||||
];
|
||||
let schema = SchemaBuilder::try_from(column_schemas)
|
||||
.unwrap()
|
||||
.version(123)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(Arc::new(schema))
|
||||
.primary_key_indices(vec![0])
|
||||
.engine("engine")
|
||||
.next_column_id(3)
|
||||
.region_numbers(region_numbers.into_iter().collect::<Vec<_>>())
|
||||
.build()
|
||||
.unwrap();
|
||||
TableInfoBuilder::default()
|
||||
.table_id(table_id)
|
||||
.table_version(5)
|
||||
.name("mytable")
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
@@ -12,7 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
use std::any::Any;
|
||||
@@ -26,8 +28,7 @@ use crate::error::Error;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
@@ -65,9 +66,6 @@ where
|
||||
req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse, Self::Error>;
|
||||
|
||||
/// MoveValue atomically renames the key to the given updated key.
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse, Self::Error>;
|
||||
|
||||
// The following methods are implemented based on the above methods,
|
||||
// and a higher-level interface is provided for to simplify usage.
|
||||
|
||||
@@ -130,3 +128,12 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ResettableKvBackend: KvBackend
|
||||
where
|
||||
Self::Error: ErrorExt,
|
||||
{
|
||||
fn reset(&self);
|
||||
}
|
||||
|
||||
pub type ResettableKvBackendRef = Arc<dyn ResettableKvBackend<Error = Error> + Send + Sync>;
|
||||
|
||||
@@ -15,27 +15,47 @@
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse};
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::metrics::METRIC_META_TXN_REQUEST;
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::{timer, warn};
|
||||
use etcd_client::{
|
||||
Client, Compare, CompareOp, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse,
|
||||
TxnResponse,
|
||||
};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error;
|
||||
use crate::error::{ConvertEtcdTxnObjectSnafu, Error, Result};
|
||||
use crate::service::store::etcd_util::KvPair;
|
||||
use crate::service::store::kv::KvStoreRef;
|
||||
use super::KvBackendRef;
|
||||
use crate::error::{self, Error, Result};
|
||||
use crate::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub struct KvPair<'a>(&'a etcd_client::KeyValue);
|
||||
|
||||
impl<'a> KvPair<'a> {
|
||||
/// Creates a `KvPair` from etcd KeyValue
|
||||
#[inline]
|
||||
pub fn new(kv: &'a etcd_client::KeyValue) -> Self {
|
||||
Self(kv)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_etcd_kv(kv: &etcd_client::KeyValue) -> KeyValue {
|
||||
KeyValue::from(KvPair::new(kv))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<KvPair<'a>> for KeyValue {
|
||||
fn from(kv: KvPair<'a>) -> Self {
|
||||
Self {
|
||||
key: kv.0.key().to_vec(),
|
||||
value: kv.0.value().to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Maximum number of operations permitted in a transaction.
|
||||
// The etcd default configuration's `--max-txn-ops` is 128.
|
||||
@@ -48,7 +68,7 @@ pub struct EtcdStore {
|
||||
}
|
||||
|
||||
impl EtcdStore {
|
||||
pub async fn with_endpoints<E, S>(endpoints: S) -> Result<KvStoreRef>
|
||||
pub async fn with_endpoints<E, S>(endpoints: S) -> Result<KvBackendRef>
|
||||
where
|
||||
E: AsRef<str>,
|
||||
S: AsRef<[E]>,
|
||||
@@ -60,7 +80,7 @@ impl EtcdStore {
|
||||
Ok(Self::with_etcd_client(client))
|
||||
}
|
||||
|
||||
pub fn with_etcd_client(client: Client) -> KvStoreRef {
|
||||
pub fn with_etcd_client(client: Client) -> KvBackendRef {
|
||||
Arc::new(Self { client })
|
||||
}
|
||||
|
||||
@@ -289,79 +309,6 @@ impl KvBackend for EtcdStore {
|
||||
|
||||
Ok(BatchDeleteResponse { prev_kvs })
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
let MoveValue {
|
||||
from_key,
|
||||
to_key,
|
||||
delete_options,
|
||||
} = req.try_into()?;
|
||||
|
||||
let mut client = self.client.kv_client();
|
||||
|
||||
// TODO(jiachun): Maybe it's better to let the users control it in the request
|
||||
const MAX_RETRIES: usize = 8;
|
||||
for _ in 0..MAX_RETRIES {
|
||||
let from_key = from_key.as_slice();
|
||||
let to_key = to_key.as_slice();
|
||||
|
||||
let res = client
|
||||
.get(from_key, None)
|
||||
.await
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
|
||||
let txn = match res.kvs().first() {
|
||||
None => {
|
||||
// get `to_key` if `from_key` absent
|
||||
// revision 0 means key was not exist
|
||||
let compare = Compare::create_revision(from_key, CompareOp::Equal, 0);
|
||||
let get = TxnOp::get(to_key, None);
|
||||
Txn::new().when(vec![compare]).and_then(vec![get])
|
||||
}
|
||||
Some(kv) => {
|
||||
// compare `from_key` and move to `to_key`
|
||||
let value = kv.value();
|
||||
let compare = Compare::value(from_key, CompareOp::Equal, value);
|
||||
let delete = TxnOp::delete(from_key, delete_options.clone());
|
||||
let put = TxnOp::put(to_key, value, None);
|
||||
Txn::new().when(vec![compare]).and_then(vec![delete, put])
|
||||
}
|
||||
};
|
||||
|
||||
let txn_res = client.txn(txn).await.context(error::EtcdFailedSnafu)?;
|
||||
|
||||
if !txn_res.succeeded() {
|
||||
warn!(
|
||||
"Failed to atomically move {:?} to {:?}, try again...",
|
||||
String::from_utf8_lossy(from_key),
|
||||
String::from_utf8_lossy(to_key)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// [`get_res'] or [`delete_res`, `put_res`], `put_res` will be ignored.
|
||||
for op_res in txn_res.op_responses() {
|
||||
match op_res {
|
||||
TxnOpResponse::Get(res) => {
|
||||
return Ok(MoveValueResponse(
|
||||
res.kvs().first().map(KvPair::from_etcd_kv),
|
||||
));
|
||||
}
|
||||
TxnOpResponse::Delete(res) => {
|
||||
return Ok(MoveValueResponse(
|
||||
res.prev_kvs().first().map(KvPair::from_etcd_kv),
|
||||
));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error::MoveValueSnafu {
|
||||
key: String::from_utf8_lossy(&from_key),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -369,10 +316,9 @@ impl TxnService for EtcdStore {
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, txn: KvTxn) -> Result<KvTxnResponse> {
|
||||
let _timer = timer!(
|
||||
METRIC_META_TXN_REQUEST,
|
||||
&[("target", "etcd".to_string()), ("op", "txn".to_string()),]
|
||||
);
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&["etcd", "txn"])
|
||||
.start_timer();
|
||||
|
||||
let etcd_txn: Txn = txn.into();
|
||||
let txn_res = self
|
||||
@@ -381,7 +327,7 @@ impl TxnService for EtcdStore {
|
||||
.txn(etcd_txn)
|
||||
.await
|
||||
.context(error::EtcdFailedSnafu)?;
|
||||
txn_res.try_into().context(ConvertEtcdTxnObjectSnafu)
|
||||
txn_res.try_into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -570,26 +516,6 @@ impl TryFrom<DeleteRangeRequest> for Delete {
|
||||
}
|
||||
}
|
||||
|
||||
struct MoveValue {
|
||||
from_key: Vec<u8>,
|
||||
to_key: Vec<u8>,
|
||||
delete_options: Option<DeleteOptions>,
|
||||
}
|
||||
|
||||
impl TryFrom<MoveValueRequest> for MoveValue {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(req: MoveValueRequest) -> Result<Self> {
|
||||
let MoveValueRequest { from_key, to_key } = req;
|
||||
|
||||
Ok(MoveValue {
|
||||
from_key,
|
||||
to_key,
|
||||
delete_options: Some(DeleteOptions::default().with_prev_key()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -701,18 +627,4 @@ mod tests {
|
||||
assert_eq!(b"test_key".to_vec(), delete.key);
|
||||
let _ = delete.options.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_move_value() {
|
||||
let req = MoveValueRequest {
|
||||
from_key: b"test_from_key".to_vec(),
|
||||
to_key: b"test_to_key".to_vec(),
|
||||
};
|
||||
|
||||
let move_value: MoveValue = req.try_into().unwrap();
|
||||
|
||||
assert_eq!(b"test_from_key".to_vec(), move_value.from_key);
|
||||
assert_eq!(b"test_to_key".to_vec(), move_value.to_key);
|
||||
let _ = move_value.delete_options.unwrap();
|
||||
}
|
||||
}
|
||||
@@ -17,22 +17,20 @@ use std::collections::btree_map::Entry;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_telemetry::timer;
|
||||
use serde::Serializer;
|
||||
|
||||
use super::ResettableKvBackend;
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
@@ -85,21 +83,25 @@ impl<T: ErrorExt + Send + Sync + 'static> KvBackend for MemoryKvBackend<T> {
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse, Self::Error> {
|
||||
let range = req.range();
|
||||
let RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
limit,
|
||||
keys_only,
|
||||
limit, keys_only, ..
|
||||
} = req;
|
||||
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
let values = kvs.range(range);
|
||||
|
||||
let iter: Box<dyn Iterator<Item = (&Vec<u8>, &Vec<u8>)>> = if range_end.is_empty() {
|
||||
Box::new(kvs.get_key_value(&key).into_iter())
|
||||
} else {
|
||||
Box::new(kvs.range(key..range_end))
|
||||
};
|
||||
let mut kvs = iter
|
||||
let mut more = false;
|
||||
let mut iter: i64 = 0;
|
||||
|
||||
let kvs = values
|
||||
.take_while(|_| {
|
||||
let take = limit == 0 || iter != limit;
|
||||
iter += 1;
|
||||
more = limit > 0 && iter > limit;
|
||||
|
||||
take
|
||||
})
|
||||
.map(|(k, v)| {
|
||||
let key = k.clone();
|
||||
let value = if keys_only { vec![] } else { v.clone() };
|
||||
@@ -107,13 +109,6 @@ impl<T: ErrorExt + Send + Sync + 'static> KvBackend for MemoryKvBackend<T> {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let more = if limit > 0 && kvs.len() > limit as usize {
|
||||
kvs.truncate(limit as usize);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
Ok(RangeResponse { kvs, more })
|
||||
}
|
||||
|
||||
@@ -215,36 +210,32 @@ impl<T: ErrorExt + Send + Sync + 'static> KvBackend for MemoryKvBackend<T> {
|
||||
&self,
|
||||
req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse, Self::Error> {
|
||||
let DeleteRangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
prev_kv,
|
||||
} = req;
|
||||
let range = req.range();
|
||||
let DeleteRangeRequest { prev_kv, .. } = req;
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let prev_kvs = if range_end.is_empty() {
|
||||
kvs.remove(&key)
|
||||
.into_iter()
|
||||
.map(|value| KeyValue {
|
||||
key: key.clone(),
|
||||
value,
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
let range = Range {
|
||||
start: key,
|
||||
end: range_end,
|
||||
};
|
||||
kvs.extract_if(|key, _| range.contains(key))
|
||||
.map(Into::into)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
let keys = kvs
|
||||
.range(range)
|
||||
.map(|(key, _)| key.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(DeleteRangeResponse {
|
||||
deleted: prev_kvs.len() as i64,
|
||||
prev_kvs: if prev_kv { prev_kvs } else { vec![] },
|
||||
})
|
||||
let mut prev_kvs = if prev_kv {
|
||||
Vec::with_capacity(keys.len())
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let deleted = keys.len() as i64;
|
||||
|
||||
for key in keys {
|
||||
if let Some(value) = kvs.remove(&key) {
|
||||
if prev_kv {
|
||||
prev_kvs.push((key.clone(), value).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(DeleteRangeResponse { deleted, prev_kvs })
|
||||
}
|
||||
|
||||
async fn batch_delete(
|
||||
@@ -271,27 +262,6 @@ impl<T: ErrorExt + Send + Sync + 'static> KvBackend for MemoryKvBackend<T> {
|
||||
|
||||
Ok(BatchDeleteResponse { prev_kvs })
|
||||
}
|
||||
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse, Self::Error> {
|
||||
let MoveValueRequest { from_key, to_key } = req;
|
||||
|
||||
let mut kvs = self.kvs.write().unwrap();
|
||||
|
||||
let kv = if let Some(v) = kvs.remove(&from_key) {
|
||||
kvs.insert(to_key, v.clone());
|
||||
Some(KeyValue {
|
||||
key: from_key,
|
||||
value: v,
|
||||
})
|
||||
} else {
|
||||
kvs.get(&to_key).map(|v| KeyValue {
|
||||
key: to_key,
|
||||
value: v.clone(),
|
||||
})
|
||||
};
|
||||
|
||||
Ok(MoveValueResponse(kv))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -299,10 +269,9 @@ impl<T: ErrorExt + Send + Sync> TxnService for MemoryKvBackend<T> {
|
||||
type Error = T;
|
||||
|
||||
async fn txn(&self, txn: Txn) -> Result<TxnResponse, Self::Error> {
|
||||
let _timer = timer!(
|
||||
METRIC_META_TXN_REQUEST,
|
||||
&[("target", "memory"), ("op", "txn")]
|
||||
);
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&["memory", "txn"])
|
||||
.start_timer();
|
||||
|
||||
let TxnRequest {
|
||||
compare,
|
||||
@@ -356,315 +325,76 @@ impl<T: ErrorExt + Send + Sync> TxnService for MemoryKvBackend<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ErrorExt + Send + Sync + 'static> ResettableKvBackend for MemoryKvBackend<T> {
|
||||
fn reset(&self) {
|
||||
self.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::{BatchGetRequest, BatchPutRequest};
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::util;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv, test_kv_batch_delete, test_kv_batch_get, test_kv_compare_and_put,
|
||||
test_kv_delete_range, test_kv_put, test_kv_range, test_kv_range_2,
|
||||
};
|
||||
|
||||
async fn mock_mem_store_with_data() -> MemoryKvBackend<Error> {
|
||||
let kv_store = MemoryKvBackend::<Error>::new();
|
||||
let kvs = mock_kvs();
|
||||
let kv_backend = MemoryKvBackend::<Error>::new();
|
||||
prepare_kv(&kv_backend).await;
|
||||
|
||||
assert!(kv_store
|
||||
.batch_put(BatchPutRequest {
|
||||
kvs,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
assert!(kv_store
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val11".to_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
kv_store
|
||||
}
|
||||
|
||||
fn mock_kvs() -> Vec<KeyValue> {
|
||||
vec![
|
||||
KeyValue {
|
||||
key: b"key1".to_vec(),
|
||||
value: b"val1".to_vec(),
|
||||
},
|
||||
KeyValue {
|
||||
key: b"key2".to_vec(),
|
||||
value: b"val2".to_vec(),
|
||||
},
|
||||
KeyValue {
|
||||
key: b"key3".to_vec(),
|
||||
value: b"val3".to_vec(),
|
||||
},
|
||||
]
|
||||
kv_backend
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
let resp = kv_store
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val12".to_vec(),
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.prev_kv.is_none());
|
||||
|
||||
let resp = kv_store
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val13".to_vec(),
|
||||
prev_kv: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let prev_kv = resp.prev_kv.unwrap();
|
||||
assert_eq!(b"key11", prev_kv.key());
|
||||
assert_eq!(b"val12", prev_kv.value());
|
||||
test_kv_put(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
test_kv_range(kv_backend).await;
|
||||
}
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
#[tokio::test]
|
||||
async fn test_range_2() {
|
||||
let kv = MemoryKvBackend::<Error>::new();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"val11", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"", resp.kvs[0].value());
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(1, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
|
||||
let resp = kv_store
|
||||
.range(RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
limit: 1,
|
||||
keys_only: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(1, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
test_kv_range_2(kv).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_get() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
let keys = vec![];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key10".to_vec()];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key1".to_vec(), b"key3".to_vec(), b"key4".to_vec()];
|
||||
let resp = kv_store.batch_get(BatchGetRequest { keys }).await.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
assert_eq!(b"key3", resp.kvs[1].key());
|
||||
assert_eq!(b"val3", resp.kvs[1].value());
|
||||
test_kv_batch_get(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_compare_and_put() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let success = Arc::new(AtomicU8::new(0));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
|
||||
let mut joins = vec![];
|
||||
for _ in 0..20 {
|
||||
let kv_store_clone = kv_store.clone();
|
||||
let success_clone = success.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
let req = CompareAndPutRequest {
|
||||
key: b"key".to_vec(),
|
||||
expect: vec![],
|
||||
value: b"val_new".to_vec(),
|
||||
};
|
||||
let resp = kv_store_clone.compare_and_put(req).await.unwrap();
|
||||
if resp.success {
|
||||
success_clone.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
});
|
||||
joins.push(join);
|
||||
}
|
||||
|
||||
for join in joins {
|
||||
join.await.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(1, success.load(Ordering::SeqCst));
|
||||
test_kv_compare_and_put(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_range() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key3".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: true,
|
||||
};
|
||||
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(b"key3", resp.prev_kvs[0].key());
|
||||
assert_eq!(b"val3", resp.prev_kvs[0].value());
|
||||
|
||||
let resp = kv_store.get(b"key3").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key2".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
let resp = kv_store.get(b"key2").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_store.delete_range(req).await.unwrap();
|
||||
assert_eq!(2, resp.prev_kvs.len());
|
||||
|
||||
let req = RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
..Default::default()
|
||||
};
|
||||
let resp = kv_store.range(req).await.unwrap();
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_move_value() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let req = MoveValueRequest {
|
||||
from_key: b"key1".to_vec(),
|
||||
to_key: b"key111".to_vec(),
|
||||
};
|
||||
|
||||
let resp = kv_store.move_value(req).await.unwrap();
|
||||
assert_eq!(b"key1", resp.0.as_ref().unwrap().key());
|
||||
assert_eq!(b"val1", resp.0.as_ref().unwrap().value());
|
||||
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
|
||||
let req = MoveValueRequest {
|
||||
from_key: b"notexistkey".to_vec(),
|
||||
to_key: b"key222".to_vec(),
|
||||
};
|
||||
|
||||
let resp = kv_store.move_value(req).await.unwrap();
|
||||
assert!(resp.0.is_none());
|
||||
test_kv_delete_range(kv_backend).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_delete() {
|
||||
let kv_store = mock_mem_store_with_data().await;
|
||||
let kv_backend = mock_mem_store_with_data().await;
|
||||
|
||||
assert!(kv_store.get(b"key1").await.unwrap().is_some());
|
||||
assert!(kv_store.get(b"key100").await.unwrap().is_none());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key1".to_vec(), b"key100".to_vec()],
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_store.batch_delete(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(
|
||||
vec![KeyValue {
|
||||
key: b"key1".to_vec(),
|
||||
value: b"val1".to_vec()
|
||||
}],
|
||||
resp.prev_kvs
|
||||
);
|
||||
assert!(kv_store.get(b"key1").await.unwrap().is_none());
|
||||
|
||||
assert!(kv_store.get(b"key2").await.unwrap().is_some());
|
||||
assert!(kv_store.get(b"key3").await.unwrap().is_some());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key2".to_vec(), b"key3".to_vec()],
|
||||
prev_kv: false,
|
||||
};
|
||||
let resp = kv_store.batch_delete(req).await.unwrap();
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
assert!(kv_store.get(b"key2").await.unwrap().is_none());
|
||||
assert!(kv_store.get(b"key3").await.unwrap().is_none());
|
||||
test_kv_batch_delete(kv_backend).await;
|
||||
}
|
||||
}
|
||||
|
||||
361
src/common/meta/src/kv_backend/test.rs
Normal file
361
src/common/meta/src/kv_backend/test.rs
Normal file
@@ -0,0 +1,361 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::{KvBackend, *};
|
||||
use crate::error::Error;
|
||||
use crate::rpc::store::{BatchGetRequest, PutRequest};
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::util;
|
||||
|
||||
pub fn mock_kvs() -> Vec<KeyValue> {
|
||||
vec![
|
||||
KeyValue {
|
||||
key: b"key1".to_vec(),
|
||||
value: b"val1".to_vec(),
|
||||
},
|
||||
KeyValue {
|
||||
key: b"key2".to_vec(),
|
||||
value: b"val2".to_vec(),
|
||||
},
|
||||
KeyValue {
|
||||
key: b"key3".to_vec(),
|
||||
value: b"val3".to_vec(),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
pub async fn prepare_kv(kv_backend: &impl KvBackend) {
|
||||
let kvs = mock_kvs();
|
||||
assert!(kv_backend
|
||||
.batch_put(BatchPutRequest {
|
||||
kvs,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
assert!(kv_backend
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val11".to_vec(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
pub async fn test_kv_put(kv_backend: impl KvBackend) {
|
||||
let resp = kv_backend
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val12".to_vec(),
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.prev_kv.is_none());
|
||||
|
||||
let resp = kv_backend
|
||||
.put(PutRequest {
|
||||
key: b"key11".to_vec(),
|
||||
value: b"val13".to_vec(),
|
||||
prev_kv: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let prev_kv = resp.prev_kv.unwrap();
|
||||
assert_eq!(b"key11", prev_kv.key());
|
||||
assert_eq!(b"val12", prev_kv.value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_range(kv_backend: impl KvBackend) {
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"val11", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"", resp.kvs[0].value());
|
||||
assert_eq!(b"key11", resp.kvs[1].key());
|
||||
assert_eq!(b"", resp.kvs[1].value());
|
||||
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key: key.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(1, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
|
||||
let resp = kv_backend
|
||||
.range(RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
limit: 1,
|
||||
keys_only: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(1, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_range_2(kv_backend: impl KvBackend) {
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("atest").with_value("value"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
kv_backend
|
||||
.put(PutRequest::new().with_key("test").with_value("value"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// If both key and range_end are ‘\0’, then range represents all keys.
|
||||
let result = kv_backend
|
||||
.range(RangeRequest::new().with_range(b"\0".to_vec(), b"\0".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.kvs.len(), 2);
|
||||
assert!(!result.more);
|
||||
|
||||
// If range_end is ‘\0’, the range is all keys greater than or equal to the key argument.
|
||||
let result = kv_backend
|
||||
.range(RangeRequest::new().with_range(b"a".to_vec(), b"\0".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.kvs.len(), 2);
|
||||
|
||||
let result = kv_backend
|
||||
.range(RangeRequest::new().with_range(b"b".to_vec(), b"\0".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.kvs.len(), 1);
|
||||
assert_eq!(result.kvs[0].key, b"test");
|
||||
|
||||
// Fetches the keys >= "a", set limit to 1, the `more` should be true.
|
||||
let result = kv_backend
|
||||
.range(
|
||||
RangeRequest::new()
|
||||
.with_range(b"a".to_vec(), b"\0".to_vec())
|
||||
.with_limit(1),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result.kvs.len(), 1);
|
||||
assert!(result.more);
|
||||
|
||||
// Fetches the keys >= "a", set limit to 2, the `more` should be false.
|
||||
let result = kv_backend
|
||||
.range(
|
||||
RangeRequest::new()
|
||||
.with_range(b"a".to_vec(), b"\0".to_vec())
|
||||
.with_limit(2),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result.kvs.len(), 2);
|
||||
assert!(!result.more);
|
||||
|
||||
// Fetches the keys >= "a", set limit to 3, the `more` should be false.
|
||||
let result = kv_backend
|
||||
.range(
|
||||
RangeRequest::new()
|
||||
.with_range(b"a".to_vec(), b"\0".to_vec())
|
||||
.with_limit(3),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result.kvs.len(), 2);
|
||||
assert!(!result.more);
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_get(kv_backend: impl KvBackend) {
|
||||
let keys = vec![];
|
||||
let resp = kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key10".to_vec()];
|
||||
let resp = kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(resp.kvs.is_empty());
|
||||
|
||||
let keys = vec![b"key1".to_vec(), b"key3".to_vec(), b"key4".to_vec()];
|
||||
let resp = kv_backend
|
||||
.batch_get(BatchGetRequest { keys })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(2, resp.kvs.len());
|
||||
assert_eq!(b"key1", resp.kvs[0].key());
|
||||
assert_eq!(b"val1", resp.kvs[0].value());
|
||||
assert_eq!(b"key3", resp.kvs[1].key());
|
||||
assert_eq!(b"val3", resp.kvs[1].value());
|
||||
}
|
||||
|
||||
pub async fn test_kv_compare_and_put(kv_backend: Arc<dyn KvBackend<Error = Error>>) {
|
||||
let success = Arc::new(AtomicU8::new(0));
|
||||
|
||||
let mut joins = vec![];
|
||||
for _ in 0..20 {
|
||||
let kv_backend_clone = kv_backend.clone();
|
||||
let success_clone = success.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
let req = CompareAndPutRequest {
|
||||
key: b"key".to_vec(),
|
||||
expect: vec![],
|
||||
value: b"val_new".to_vec(),
|
||||
};
|
||||
let resp = kv_backend_clone.compare_and_put(req).await.unwrap();
|
||||
if resp.success {
|
||||
success_clone.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
});
|
||||
joins.push(join);
|
||||
}
|
||||
|
||||
for join in joins {
|
||||
join.await.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(1, success.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
pub async fn test_kv_delete_range(kv_backend: impl KvBackend) {
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key3".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: true,
|
||||
};
|
||||
|
||||
let resp = kv_backend.delete_range(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(1, resp.deleted);
|
||||
assert_eq!(b"key3", resp.prev_kvs[0].key());
|
||||
assert_eq!(b"val3", resp.prev_kvs[0].value());
|
||||
|
||||
let resp = kv_backend.get(b"key3").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: b"key2".to_vec(),
|
||||
range_end: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
|
||||
let resp = kv_backend.delete_range(req).await.unwrap();
|
||||
assert_eq!(1, resp.deleted);
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
let resp = kv_backend.get(b"key2").await.unwrap();
|
||||
assert!(resp.is_none());
|
||||
|
||||
let key = b"key1".to_vec();
|
||||
let range_end = util::get_prefix_end_key(b"key1");
|
||||
|
||||
let req = DeleteRangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: range_end.clone(),
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_backend.delete_range(req).await.unwrap();
|
||||
assert_eq!(2, resp.prev_kvs.len());
|
||||
|
||||
let req = RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
..Default::default()
|
||||
};
|
||||
let resp = kv_backend.range(req).await.unwrap();
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
pub async fn test_kv_batch_delete(kv_backend: impl KvBackend) {
|
||||
assert!(kv_backend.get(b"key1").await.unwrap().is_some());
|
||||
assert!(kv_backend.get(b"key100").await.unwrap().is_none());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key1".to_vec(), b"key100".to_vec()],
|
||||
prev_kv: true,
|
||||
};
|
||||
let resp = kv_backend.batch_delete(req).await.unwrap();
|
||||
assert_eq!(1, resp.prev_kvs.len());
|
||||
assert_eq!(
|
||||
vec![KeyValue {
|
||||
key: b"key1".to_vec(),
|
||||
value: b"val1".to_vec()
|
||||
}],
|
||||
resp.prev_kvs
|
||||
);
|
||||
assert!(kv_backend.get(b"key1").await.unwrap().is_none());
|
||||
|
||||
assert!(kv_backend.get(b"key2").await.unwrap().is_some());
|
||||
assert!(kv_backend.get(b"key3").await.unwrap().is_some());
|
||||
|
||||
let req = BatchDeleteRequest {
|
||||
keys: vec![b"key2".to_vec(), b"key3".to_vec()],
|
||||
prev_kv: false,
|
||||
};
|
||||
let resp = kv_backend.batch_delete(req).await.unwrap();
|
||||
assert!(resp.prev_kvs.is_empty());
|
||||
|
||||
assert!(kv_backend.get(b"key2").await.unwrap().is_none());
|
||||
assert!(kv_backend.get(b"key3").await.unwrap().is_none());
|
||||
}
|
||||
@@ -268,9 +268,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_one_compare_op() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
let _ = kv_store
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![11],
|
||||
value: vec![3],
|
||||
@@ -288,7 +288,7 @@ mod tests {
|
||||
.and_then(vec![TxnOp::Put(vec![11], vec![1])])
|
||||
.or_else(vec![TxnOp::Put(vec![11], vec![2])]);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 1);
|
||||
@@ -296,10 +296,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_multi_compare_op() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
|
||||
for i in 1..3 {
|
||||
let _ = kv_store
|
||||
let _ = kv_backend
|
||||
.put(PutRequest {
|
||||
key: vec![i],
|
||||
value: vec![i],
|
||||
@@ -321,7 +321,7 @@ mod tests {
|
||||
])
|
||||
.or_else(vec![TxnOp::Put(vec![1], vec![11])]);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
|
||||
assert!(txn_response.succeeded);
|
||||
assert_eq!(txn_response.responses.len(), 2);
|
||||
@@ -329,9 +329,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_equal() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![101u8];
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -340,10 +340,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -354,15 +354,15 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Put(key, vec![4])]);
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_greater() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![102u8];
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -371,10 +371,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -385,7 +385,7 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
@@ -402,9 +402,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_less() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![103u8];
|
||||
kv_store.delete(&[3], false).await.unwrap();
|
||||
kv_backend.delete(&[3], false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -413,10 +413,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -427,7 +427,7 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
@@ -444,9 +444,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_txn_compare_not_equal() {
|
||||
let kv_store = create_kv_store().await;
|
||||
let kv_backend = create_kv_backend().await;
|
||||
let key = vec![104u8];
|
||||
kv_store.delete(&key, false).await.unwrap();
|
||||
kv_backend.delete(&key, false).await.unwrap();
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_not_exist_value(
|
||||
@@ -455,10 +455,10 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![1])])
|
||||
.or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
|
||||
let txn_response = kv_store.txn(txn.clone()).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn.clone()).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
|
||||
let txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(txn_response.succeeded);
|
||||
|
||||
let txn = Txn::new()
|
||||
@@ -469,7 +469,7 @@ mod tests {
|
||||
)])
|
||||
.and_then(vec![TxnOp::Put(key.clone(), vec![3])])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
let mut txn_response = kv_store.txn(txn).await.unwrap();
|
||||
let mut txn_response = kv_backend.txn(txn).await.unwrap();
|
||||
assert!(!txn_response.succeeded);
|
||||
let res = txn_response.responses.pop().unwrap();
|
||||
assert_eq!(
|
||||
@@ -484,7 +484,7 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_kv_store() -> KvBackendRef {
|
||||
async fn create_kv_backend() -> KvBackendRef {
|
||||
Arc::new(MemoryKvBackend::<Error>::new())
|
||||
// TODO(jiachun): Add a feature to test against etcd in github CI
|
||||
//
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![feature(btree_extract_if)]
|
||||
#![feature(async_closure)]
|
||||
|
||||
|
||||
@@ -12,11 +12,42 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const METRIC_META_TXN_REQUEST: &str = "meta.txn_request";
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::*;
|
||||
|
||||
pub(crate) const METRIC_META_CREATE_CATALOG: &str = "meta.create_catalog";
|
||||
pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
|
||||
pub(crate) const METRIC_META_PROCEDURE_CREATE_TABLE: &str = "meta.procedure.create_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_DROP_TABLE: &str = "meta.procedure.drop_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_ALTER_TABLE: &str = "meta.procedure.alter_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_TRUNCATE_TABLE: &str = "meta.procedure.truncate_table";
|
||||
lazy_static! {
|
||||
pub static ref METRIC_META_TXN_REQUEST: HistogramVec =
|
||||
register_histogram_vec!("meta_txn_request", "meta txn request", &["target", "op"]).unwrap();
|
||||
pub static ref METRIC_META_CREATE_CATALOG: Histogram =
|
||||
register_histogram!("meta_create_catalog", "meta create catalog").unwrap();
|
||||
pub static ref METRIC_META_CREATE_CATALOG_COUNTER: IntCounter =
|
||||
register_int_counter!("meta_create_catalog_counter", "meta create catalog").unwrap();
|
||||
pub static ref METRIC_META_CREATE_SCHEMA: Histogram =
|
||||
register_histogram!("meta_create_schema", "meta create schema").unwrap();
|
||||
pub static ref METRIC_META_CREATE_SCHEMA_COUNTER: IntCounter =
|
||||
register_int_counter!("meta_create_schema_counter", "meta create schema").unwrap();
|
||||
pub static ref METRIC_META_PROCEDURE_CREATE_TABLE: HistogramVec = register_histogram_vec!(
|
||||
"meta_procedure_create_table",
|
||||
"meta procedure create table",
|
||||
&["step"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_META_PROCEDURE_DROP_TABLE: HistogramVec = register_histogram_vec!(
|
||||
"meta_procedure_drop_table",
|
||||
"meta procedure drop table",
|
||||
&["step"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_META_PROCEDURE_ALTER_TABLE: HistogramVec = register_histogram_vec!(
|
||||
"meta_procedure_alter_table",
|
||||
"meta procedure alter table",
|
||||
&["step"]
|
||||
)
|
||||
.unwrap();
|
||||
pub static ref METRIC_META_PROCEDURE_TRUNCATE_TABLE: HistogramVec = register_histogram_vec!(
|
||||
"meta_procedure_truncate_table",
|
||||
"meta procedure truncate table",
|
||||
&["step"]
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -49,6 +49,14 @@ impl Peer {
|
||||
addr: addr.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn empty(id: u64) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Peer {
|
||||
|
||||
@@ -17,10 +17,12 @@ use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use common_telemetry::debug;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{ready, FutureExt, Stream};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error::{self, Result};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{RangeRequest, RangeResponse};
|
||||
use crate::rpc::KeyValue;
|
||||
@@ -39,7 +41,16 @@ enum PaginationStreamState<K, V> {
|
||||
Error,
|
||||
}
|
||||
|
||||
pub const DEFAULT_PAGE_SIZE: usize = 512;
|
||||
/// The Range Request's default page size.
|
||||
///
|
||||
/// It dependents on upstream KvStore server side grpc message size limitation.
|
||||
/// (e.g., etcd has default grpc message size limitation is 4MiB)
|
||||
///
|
||||
/// Generally, almost all metadata is smaller than is 2700 Byte.
|
||||
/// Therefore, We can set the [DEFAULT_PAGE_SIZE] to 1536 statically.
|
||||
///
|
||||
/// TODO(weny): Considers updating the default page size dynamically.
|
||||
pub const DEFAULT_PAGE_SIZE: usize = 1536;
|
||||
|
||||
struct PaginationStreamFactory {
|
||||
kv: KvBackendRef,
|
||||
@@ -55,10 +66,13 @@ struct PaginationStreamFactory {
|
||||
pub range_end: Vec<u8>,
|
||||
|
||||
/// page_size is the pagination page size.
|
||||
pub page_size: usize,
|
||||
page_size: usize,
|
||||
/// keys_only when set returns only the keys and not the values.
|
||||
pub keys_only: bool,
|
||||
|
||||
/// It reduces the page size if the response size exceeds the limit.
|
||||
pub adaptive_page_size: usize,
|
||||
|
||||
pub more: bool,
|
||||
}
|
||||
|
||||
@@ -78,19 +92,58 @@ impl PaginationStreamFactory {
|
||||
page_size,
|
||||
keys_only,
|
||||
more,
|
||||
adaptive_page_size: if page_size == 0 {
|
||||
DEFAULT_ADAPTIVE_PAGE_SIZE
|
||||
} else {
|
||||
page_size
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_ADAPTIVE_PAGE_SIZE: usize = 1024;
|
||||
|
||||
impl PaginationStreamFactory {
|
||||
pub async fn read_next(self) -> Result<(Self, Option<RangeResponse>)> {
|
||||
fn try_reduce_adaptive_page_size(&mut self) -> Result<()> {
|
||||
self.adaptive_page_size /= 2;
|
||||
|
||||
ensure!(
|
||||
self.adaptive_page_size != 0,
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "Exceeded maximum number of adaptive range retries"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decreases the `page size` if the response message size exceeds the limitation.
|
||||
/// TODO(weny): Considers to add an E2e test.
|
||||
#[async_recursion::async_recursion]
|
||||
async fn adaptive_range(&mut self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
match self.kv.range(req.clone()).await {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(err) => {
|
||||
if err.is_exceeded_size_limit() {
|
||||
self.try_reduce_adaptive_page_size()?;
|
||||
debug!("Reset page_size to {}", self.adaptive_page_size);
|
||||
|
||||
self.adaptive_range(req.with_limit(self.adaptive_page_size as i64))
|
||||
.await
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
|
||||
if self.more {
|
||||
let resp = self
|
||||
.kv
|
||||
.range(RangeRequest {
|
||||
.adaptive_range(RangeRequest {
|
||||
key: self.key.clone(),
|
||||
range_end: self.range_end.clone(),
|
||||
limit: self.page_size as i64,
|
||||
limit: self.adaptive_page_size as i64,
|
||||
keys_only: self.keys_only,
|
||||
})
|
||||
.await?;
|
||||
@@ -111,6 +164,7 @@ impl PaginationStreamFactory {
|
||||
page_size: self.page_size,
|
||||
keys_only: self.keys_only,
|
||||
more: resp.more,
|
||||
adaptive_page_size: self.adaptive_page_size,
|
||||
},
|
||||
Some(resp),
|
||||
))
|
||||
@@ -214,6 +268,7 @@ impl<K, V> Stream for PaginationStream<K, V> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use futures::TryStreamExt;
|
||||
@@ -228,12 +283,47 @@ mod tests {
|
||||
Ok((kv.key.clone(), kv.value))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_reduce_page_size() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new()) as _;
|
||||
|
||||
let mut factory =
|
||||
PaginationStreamFactory::new(&kv_backend, vec![], vec![], 2, false, false);
|
||||
|
||||
// new adaptive page size: 1
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
// new adaptive page size: 0
|
||||
assert_matches!(
|
||||
factory.try_reduce_adaptive_page_size().unwrap_err(),
|
||||
error::Error::Unexpected { .. }
|
||||
);
|
||||
|
||||
let mut factory =
|
||||
PaginationStreamFactory::new(&kv_backend, vec![], vec![], 1024, false, false);
|
||||
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
assert_eq!(factory.adaptive_page_size, 512);
|
||||
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
assert_eq!(factory.adaptive_page_size, 256);
|
||||
|
||||
let mut factory =
|
||||
PaginationStreamFactory::new(&kv_backend, vec![], vec![], 0, false, false);
|
||||
|
||||
factory.try_reduce_adaptive_page_size().unwrap();
|
||||
|
||||
assert_eq!(factory.adaptive_page_size, DEFAULT_ADAPTIVE_PAGE_SIZE / 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range_empty() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
kv_store.clone(),
|
||||
kv_backend.clone(),
|
||||
RangeRequest {
|
||||
key: b"a".to_vec(),
|
||||
..Default::default()
|
||||
@@ -248,14 +338,14 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_range() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
|
||||
let total = 26;
|
||||
|
||||
let mut expected = BTreeMap::<Vec<u8>, ()>::new();
|
||||
for i in 0..total {
|
||||
let key = vec![97 + i];
|
||||
|
||||
assert!(kv_store
|
||||
assert!(kv_backend
|
||||
.put(PutRequest {
|
||||
key: key.clone(),
|
||||
value: key.clone(),
|
||||
@@ -271,7 +361,7 @@ mod tests {
|
||||
let range_end = b"f".to_vec();
|
||||
|
||||
let stream = PaginationStream::new(
|
||||
kv_store.clone(),
|
||||
kv_backend.clone(),
|
||||
RangeRequest {
|
||||
key,
|
||||
range_end,
|
||||
|
||||
@@ -15,65 +15,19 @@
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
|
||||
use api::v1::meta::{
|
||||
Partition as PbPartition, Peer as PbPeer, Region as PbRegion, RegionRoute as PbRegionRoute,
|
||||
RouteRequest as PbRouteRequest, RouteResponse as PbRouteResponse, Table as PbTable,
|
||||
TableId as PbTableId, TableRoute as PbTableRoute, TableRouteValue as PbTableRouteValue,
|
||||
Partition as PbPartition, Peer as PbPeer, Region as PbRegion, Table as PbTable,
|
||||
TableRoute as PbTableRoute,
|
||||
};
|
||||
use serde::ser::SerializeSeq;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::RegionDistribution;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::util;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RouteRequest {
|
||||
pub table_ids: Vec<TableId>,
|
||||
}
|
||||
|
||||
impl From<RouteRequest> for PbRouteRequest {
|
||||
fn from(mut req: RouteRequest) -> Self {
|
||||
Self {
|
||||
header: None,
|
||||
table_ids: req.table_ids.drain(..).map(|id| PbTableId { id }).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RouteRequest {
|
||||
#[inline]
|
||||
pub fn new(table_id: TableId) -> Self {
|
||||
Self {
|
||||
table_ids: vec![table_id],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RouteResponse {
|
||||
pub table_routes: Vec<TableRoute>,
|
||||
}
|
||||
|
||||
impl TryFrom<PbRouteResponse> for RouteResponse {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbRouteResponse) -> Result<Self> {
|
||||
util::check_response_header(pb.header.as_ref())?;
|
||||
|
||||
let table_routes = pb
|
||||
.table_routes
|
||||
.into_iter()
|
||||
.map(|x| TableRoute::try_from_raw(&pb.peers, x))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
Ok(Self { table_routes })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn region_distribution(region_routes: &[RegionRoute]) -> Result<RegionDistribution> {
|
||||
let mut regions_id_map = RegionDistribution::new();
|
||||
for route in region_routes.iter() {
|
||||
@@ -104,7 +58,10 @@ pub fn find_leaders(region_routes: &[RegionRoute]) -> HashSet<Peer> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn convert_to_region_map(region_routes: &[RegionRoute]) -> HashMap<u32, &Peer> {
|
||||
/// Returns the HashMap<[RegionNumber], &[Peer]>;
|
||||
///
|
||||
/// If the region doesn't have a leader peer, the [Region] will be omitted.
|
||||
pub fn convert_to_region_leader_map(region_routes: &[RegionRoute]) -> HashMap<RegionNumber, &Peer> {
|
||||
region_routes
|
||||
.iter()
|
||||
.filter_map(|x| {
|
||||
@@ -115,7 +72,10 @@ pub fn convert_to_region_map(region_routes: &[RegionRoute]) -> HashMap<u32, &Pee
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
pub fn find_region_leader(region_routes: &[RegionRoute], region_number: u32) -> Option<&Peer> {
|
||||
pub fn find_region_leader(
|
||||
region_routes: &[RegionRoute],
|
||||
region_number: RegionNumber,
|
||||
) -> Option<&Peer> {
|
||||
region_routes
|
||||
.iter()
|
||||
.find(|x| x.region.id.region_number() == region_number)
|
||||
@@ -194,112 +154,12 @@ impl TableRoute {
|
||||
region,
|
||||
leader_peer,
|
||||
follower_peers,
|
||||
leader_status: None,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self::new(table, region_routes))
|
||||
}
|
||||
|
||||
pub fn try_into_raw(self) -> Result<(Vec<PbPeer>, PbTableRoute)> {
|
||||
let mut peers = HashSet::new();
|
||||
self.region_routes
|
||||
.iter()
|
||||
.filter_map(|x| x.leader_peer.as_ref())
|
||||
.for_each(|p| {
|
||||
let _ = peers.insert(p.clone());
|
||||
});
|
||||
self.region_routes
|
||||
.iter()
|
||||
.flat_map(|x| x.follower_peers.iter())
|
||||
.for_each(|p| {
|
||||
let _ = peers.insert(p.clone());
|
||||
});
|
||||
let mut peers = peers.into_iter().map(Into::into).collect::<Vec<PbPeer>>();
|
||||
peers.sort_by_key(|x| x.id);
|
||||
|
||||
let find_peer = |peer_id: u64| -> u64 {
|
||||
peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find_map(|(i, x)| {
|
||||
if x.id == peer_id {
|
||||
Some(i as u64)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
panic!("Peer {peer_id} must be present when collecting all peers.")
|
||||
})
|
||||
};
|
||||
|
||||
let mut region_routes = Vec::with_capacity(self.region_routes.len());
|
||||
for region_route in self.region_routes.into_iter() {
|
||||
let leader_peer_index = region_route.leader_peer.map(|x| find_peer(x.id)).context(
|
||||
error::RouteInfoCorruptedSnafu {
|
||||
err_msg: "'leader_peer' is empty in region route",
|
||||
},
|
||||
)?;
|
||||
|
||||
let follower_peer_indexes = region_route
|
||||
.follower_peers
|
||||
.iter()
|
||||
.map(|x| find_peer(x.id))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
region_routes.push(PbRegionRoute {
|
||||
region: Some(region_route.region.into()),
|
||||
leader_peer_index,
|
||||
follower_peer_indexes,
|
||||
});
|
||||
}
|
||||
|
||||
let table_route = PbTableRoute {
|
||||
table: Some(self.table.into()),
|
||||
region_routes,
|
||||
};
|
||||
Ok((peers, table_route))
|
||||
}
|
||||
|
||||
pub fn find_leaders(&self) -> HashSet<Peer> {
|
||||
find_leaders(&self.region_routes)
|
||||
}
|
||||
|
||||
pub fn find_leader_regions(&self, datanode: &Peer) -> Vec<RegionNumber> {
|
||||
find_leader_regions(&self.region_routes, datanode)
|
||||
}
|
||||
|
||||
pub fn find_region_leader(&self, region_number: RegionNumber) -> Option<&Peer> {
|
||||
self.region_leaders
|
||||
.get(®ion_number)
|
||||
.and_then(|x| x.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbTableRouteValue> for TableRoute {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbTableRouteValue) -> Result<Self> {
|
||||
TableRoute::try_from_raw(
|
||||
&pb.peers,
|
||||
pb.table_route.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected table_route",
|
||||
})?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TableRoute> for PbTableRouteValue {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(table_route: TableRoute) -> Result<Self> {
|
||||
let (peers, table_route) = table_route.try_into_raw()?;
|
||||
|
||||
Ok(PbTableRouteValue {
|
||||
peers,
|
||||
table_route: Some(table_route),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
@@ -343,17 +203,56 @@ pub struct RegionRoute {
|
||||
pub region: Region,
|
||||
pub leader_peer: Option<Peer>,
|
||||
pub follower_peers: Vec<Peer>,
|
||||
/// `None` by default.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub leader_status: Option<RegionStatus>,
|
||||
}
|
||||
|
||||
/// The Status of the [Region].
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
pub enum RegionStatus {
|
||||
/// The following cases in which the [Region] will be downgraded.
|
||||
///
|
||||
/// - The [Region] is unavailable(e.g., Crashed, Network disconnected).
|
||||
/// - The [Region] was planned to migrate to another [Peer].
|
||||
Downgraded,
|
||||
}
|
||||
|
||||
impl RegionRoute {
|
||||
/// Returns true if the Leader [Region] is downgraded.
|
||||
///
|
||||
/// The following cases in which the [Region] will be downgraded.
|
||||
///
|
||||
/// - The [Region] is unavailable(e.g., Crashed, Network disconnected).
|
||||
/// - The [Region] was planned to migrate to another [Peer].
|
||||
///
|
||||
pub fn is_leader_downgraded(&self) -> bool {
|
||||
matches!(self.leader_status, Some(RegionStatus::Downgraded))
|
||||
}
|
||||
|
||||
/// Marks the Leader [Region] as downgraded.
|
||||
///
|
||||
/// We should downgrade a [Region] before deactivating it:
|
||||
///
|
||||
/// - During the [Region] Failover Procedure.
|
||||
/// - Migrating a [Region].
|
||||
///
|
||||
/// **Notes:** Meta Server will stop renewing the lease for the downgraded [Region].
|
||||
///
|
||||
pub fn downgrade_leader(&mut self) {
|
||||
self.leader_status = Some(RegionStatus::Downgraded)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RegionRoutes(pub Vec<RegionRoute>);
|
||||
|
||||
impl RegionRoutes {
|
||||
pub fn region_map(&self) -> HashMap<u32, &Peer> {
|
||||
convert_to_region_map(&self.0)
|
||||
pub fn region_leader_map(&self) -> HashMap<RegionNumber, &Peer> {
|
||||
convert_to_region_leader_map(&self.0)
|
||||
}
|
||||
|
||||
pub fn find_region_leader(&self, region_number: u32) -> Option<&Peer> {
|
||||
self.region_map().get(®ion_number).copied()
|
||||
pub fn find_region_leader(&self, region_number: RegionNumber) -> Option<&Peer> {
|
||||
self.region_leader_map().get(®ion_number).copied()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -365,6 +264,16 @@ pub struct Region {
|
||||
pub attrs: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
impl Region {
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn new_test(id: RegionId) -> Self {
|
||||
Self {
|
||||
id,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PbRegion> for Region {
|
||||
fn from(r: PbRegion) -> Self {
|
||||
Self {
|
||||
@@ -456,14 +365,50 @@ impl From<PbPartition> for Partition {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::meta::{
|
||||
Partition as PbPartition, Peer as PbPeer, Region as PbRegion, RegionRoute as PbRegionRoute,
|
||||
RouteRequest as PbRouteRequest, RouteResponse as PbRouteResponse, Table as PbTable,
|
||||
TableName as PbTableName, TableRoute as PbTableRoute,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_leader_is_downgraded() {
|
||||
let mut region_route = RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
leader_status: None,
|
||||
};
|
||||
|
||||
assert!(!region_route.is_leader_downgraded());
|
||||
|
||||
region_route.downgrade_leader();
|
||||
|
||||
assert!(region_route.is_leader_downgraded());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_region_route_decode() {
|
||||
let region_route = RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
leader_status: None,
|
||||
};
|
||||
|
||||
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}]}"#;
|
||||
|
||||
let decoded: RegionRoute = serde_json::from_str(input).unwrap();
|
||||
|
||||
assert_eq!(decoded, region_route);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_de_serialize_partition() {
|
||||
let p = Partition {
|
||||
@@ -476,182 +421,4 @@ mod tests {
|
||||
|
||||
assert_eq!(got, p);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_route_request_trans() {
|
||||
let req = RouteRequest {
|
||||
table_ids: vec![1, 2],
|
||||
};
|
||||
|
||||
let into_req: PbRouteRequest = req.into();
|
||||
|
||||
assert!(into_req.header.is_none());
|
||||
assert_eq!(1, into_req.table_ids.get(0).unwrap().id);
|
||||
assert_eq!(2, into_req.table_ids.get(1).unwrap().id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_route_response_trans() {
|
||||
let res = PbRouteResponse {
|
||||
header: None,
|
||||
peers: vec![
|
||||
PbPeer {
|
||||
id: 1,
|
||||
addr: "peer1".to_string(),
|
||||
},
|
||||
PbPeer {
|
||||
id: 2,
|
||||
addr: "peer2".to_string(),
|
||||
},
|
||||
],
|
||||
table_routes: vec![PbTableRoute {
|
||||
table: Some(PbTable {
|
||||
id: 1,
|
||||
table_name: Some(PbTableName {
|
||||
catalog_name: "c1".to_string(),
|
||||
schema_name: "s1".to_string(),
|
||||
table_name: "t1".to_string(),
|
||||
}),
|
||||
table_schema: b"schema".to_vec(),
|
||||
}),
|
||||
region_routes: vec![PbRegionRoute {
|
||||
region: Some(PbRegion {
|
||||
id: 1,
|
||||
name: "region1".to_string(),
|
||||
partition: Some(PbPartition {
|
||||
column_list: vec![b"c1".to_vec(), b"c2".to_vec()],
|
||||
value_list: vec![b"v1".to_vec(), b"v2".to_vec()],
|
||||
}),
|
||||
attrs: Default::default(),
|
||||
}),
|
||||
leader_peer_index: 0,
|
||||
follower_peer_indexes: vec![1],
|
||||
}],
|
||||
}],
|
||||
};
|
||||
|
||||
let res: RouteResponse = res.try_into().unwrap();
|
||||
let mut table_routes = res.table_routes;
|
||||
assert_eq!(1, table_routes.len());
|
||||
let table_route = table_routes.remove(0);
|
||||
let table = table_route.table;
|
||||
assert_eq!(1, table.id);
|
||||
assert_eq!("c1", table.table_name.catalog_name);
|
||||
assert_eq!("s1", table.table_name.schema_name);
|
||||
assert_eq!("t1", table.table_name.table_name);
|
||||
|
||||
let mut region_routes = table_route.region_routes;
|
||||
assert_eq!(1, region_routes.len());
|
||||
let region_route = region_routes.remove(0);
|
||||
let region = region_route.region;
|
||||
assert_eq!(1, region.id);
|
||||
assert_eq!("region1", region.name);
|
||||
let partition = region.partition.unwrap();
|
||||
assert_eq!(vec![b"c1".to_vec(), b"c2".to_vec()], partition.column_list);
|
||||
assert_eq!(vec![b"v1".to_vec(), b"v2".to_vec()], partition.value_list);
|
||||
|
||||
assert_eq!(1, region_route.leader_peer.as_ref().unwrap().id);
|
||||
assert_eq!("peer1", region_route.leader_peer.as_ref().unwrap().addr);
|
||||
|
||||
assert_eq!(1, region_route.follower_peers.len());
|
||||
assert_eq!(2, region_route.follower_peers.get(0).unwrap().id);
|
||||
assert_eq!("peer2", region_route.follower_peers.get(0).unwrap().addr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_route_raw_conversion() {
|
||||
let raw_peers = vec![
|
||||
PbPeer {
|
||||
id: 1,
|
||||
addr: "a1".to_string(),
|
||||
},
|
||||
PbPeer {
|
||||
id: 2,
|
||||
addr: "a2".to_string(),
|
||||
},
|
||||
PbPeer {
|
||||
id: 3,
|
||||
addr: "a3".to_string(),
|
||||
},
|
||||
];
|
||||
|
||||
// region distribution:
|
||||
// region id => leader peer id + [follower peer id]
|
||||
// 1 => 2 + [1, 3]
|
||||
// 2 => 1 + [2, 3]
|
||||
|
||||
let raw_table_route = PbTableRoute {
|
||||
table: Some(PbTable {
|
||||
id: 1,
|
||||
table_name: Some(PbTableName {
|
||||
catalog_name: "c1".to_string(),
|
||||
schema_name: "s1".to_string(),
|
||||
table_name: "t1".to_string(),
|
||||
}),
|
||||
table_schema: vec![],
|
||||
}),
|
||||
region_routes: vec![
|
||||
PbRegionRoute {
|
||||
region: Some(PbRegion {
|
||||
id: 1,
|
||||
name: "r1".to_string(),
|
||||
partition: None,
|
||||
attrs: HashMap::new(),
|
||||
}),
|
||||
leader_peer_index: 1,
|
||||
follower_peer_indexes: vec![0, 2],
|
||||
},
|
||||
PbRegionRoute {
|
||||
region: Some(PbRegion {
|
||||
id: 2,
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: HashMap::new(),
|
||||
}),
|
||||
leader_peer_index: 0,
|
||||
follower_peer_indexes: vec![1, 2],
|
||||
},
|
||||
],
|
||||
};
|
||||
let table_route = TableRoute {
|
||||
table: Table {
|
||||
id: 1,
|
||||
table_name: TableName::new("c1", "s1", "t1"),
|
||||
table_schema: vec![],
|
||||
},
|
||||
region_routes: vec![
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: 1.into(),
|
||||
name: "r1".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(2, "a2")),
|
||||
follower_peers: vec![Peer::new(1, "a1"), Peer::new(3, "a3")],
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region {
|
||||
id: 2.into(),
|
||||
name: "r2".to_string(),
|
||||
partition: None,
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer::new(1, "a1")),
|
||||
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
|
||||
},
|
||||
],
|
||||
region_leaders: HashMap::from([
|
||||
(2, Some(Peer::new(1, "a1"))),
|
||||
(1, Some(Peer::new(2, "a2"))),
|
||||
]),
|
||||
};
|
||||
|
||||
let from_raw = TableRoute::try_from_raw(&raw_peers, raw_table_route.clone()).unwrap();
|
||||
assert_eq!(from_raw, table_route);
|
||||
|
||||
let into_raw = table_route.try_into_raw().unwrap();
|
||||
assert_eq!(into_raw.0, raw_peers);
|
||||
assert_eq!(into_raw.1, raw_table_route);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::ops::Bound;
|
||||
|
||||
use api::v1::meta::{
|
||||
BatchDeleteRequest as PbBatchDeleteRequest, BatchDeleteResponse as PbBatchDeleteResponse,
|
||||
@@ -20,8 +21,7 @@ use api::v1::meta::{
|
||||
BatchPutRequest as PbBatchPutRequest, BatchPutResponse as PbBatchPutResponse,
|
||||
CompareAndPutRequest as PbCompareAndPutRequest,
|
||||
CompareAndPutResponse as PbCompareAndPutResponse, DeleteRangeRequest as PbDeleteRangeRequest,
|
||||
DeleteRangeResponse as PbDeleteRangeResponse, MoveValueRequest as PbMoveValueRequest,
|
||||
MoveValueResponse as PbMoveValueResponse, PutRequest as PbPutRequest,
|
||||
DeleteRangeResponse as PbDeleteRangeResponse, PutRequest as PbPutRequest,
|
||||
PutResponse as PbPutResponse, RangeRequest as PbRangeRequest, RangeResponse as PbRangeResponse,
|
||||
ResponseHeader as PbResponseHeader,
|
||||
};
|
||||
@@ -30,6 +30,17 @@ use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::rpc::{util, KeyValue};
|
||||
|
||||
pub fn to_range(key: Vec<u8>, range_end: Vec<u8>) -> (Bound<Vec<u8>>, Bound<Vec<u8>>) {
|
||||
match (&key[..], &range_end[..]) {
|
||||
(_, []) => (Bound::Included(key.clone()), Bound::Included(key)),
|
||||
// If both key and range_end are ‘\0’, then range represents all keys.
|
||||
([0], [0]) => (Bound::Unbounded, Bound::Unbounded),
|
||||
// If range_end is ‘\0’, the range is all keys greater than or equal to the key argument.
|
||||
(_, [0]) => (Bound::Included(key), Bound::Unbounded),
|
||||
(_, _) => (Bound::Included(key), Bound::Excluded(range_end)),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RangeRequest {
|
||||
/// key is the first key for the range, If range_end is not given, the
|
||||
@@ -96,6 +107,11 @@ impl RangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `RangeBounds`.
|
||||
pub fn range(&self) -> (Bound<Vec<u8>>, Bound<Vec<u8>>) {
|
||||
to_range(self.key.clone(), self.range_end.clone())
|
||||
}
|
||||
|
||||
/// key is the first key for the range, If range_end is not given, the
|
||||
/// request only looks up key.
|
||||
#[inline]
|
||||
@@ -690,6 +706,11 @@ impl DeleteRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `RangeBounds`.
|
||||
pub fn range(&self) -> (Bound<Vec<u8>>, Bound<Vec<u8>>) {
|
||||
to_range(self.key.clone(), self.range_end.clone())
|
||||
}
|
||||
|
||||
/// key is the first key to delete in the range. If range_end is not given,
|
||||
/// the range is defined to contain only the key argument.
|
||||
#[inline]
|
||||
@@ -772,71 +793,6 @@ impl DeleteRangeResponse {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct MoveValueRequest {
|
||||
/// If from_key dose not exist, return the value of to_key (if it exists).
|
||||
/// If from_key exists, move the value of from_key to to_key (i.e. rename),
|
||||
/// and return the value.
|
||||
pub from_key: Vec<u8>,
|
||||
pub to_key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<MoveValueRequest> for PbMoveValueRequest {
|
||||
fn from(req: MoveValueRequest) -> Self {
|
||||
Self {
|
||||
header: None,
|
||||
from_key: req.from_key,
|
||||
to_key: req.to_key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PbMoveValueRequest> for MoveValueRequest {
|
||||
fn from(value: PbMoveValueRequest) -> Self {
|
||||
Self {
|
||||
from_key: value.from_key,
|
||||
to_key: value.to_key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MoveValueRequest {
|
||||
#[inline]
|
||||
pub fn new(from_key: impl Into<Vec<u8>>, to_key: impl Into<Vec<u8>>) -> Self {
|
||||
Self {
|
||||
from_key: from_key.into(),
|
||||
to_key: to_key.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MoveValueResponse(pub Option<KeyValue>);
|
||||
|
||||
impl TryFrom<PbMoveValueResponse> for MoveValueResponse {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(pb: PbMoveValueResponse) -> Result<Self> {
|
||||
util::check_response_header(pb.header.as_ref())?;
|
||||
|
||||
Ok(Self(pb.kv.map(KeyValue::new)))
|
||||
}
|
||||
}
|
||||
|
||||
impl MoveValueResponse {
|
||||
pub fn to_proto_resp(self, header: PbResponseHeader) -> PbMoveValueResponse {
|
||||
PbMoveValueResponse {
|
||||
header: Some(header),
|
||||
kv: self.0.map(Into::into),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn take_kv(&mut self) -> Option<KeyValue> {
|
||||
self.0.take()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use api::v1::meta::{
|
||||
@@ -844,10 +800,8 @@ mod tests {
|
||||
CompareAndPutRequest as PbCompareAndPutRequest,
|
||||
CompareAndPutResponse as PbCompareAndPutResponse,
|
||||
DeleteRangeRequest as PbDeleteRangeRequest, DeleteRangeResponse as PbDeleteRangeResponse,
|
||||
KeyValue as PbKeyValue, MoveValueRequest as PbMoveValueRequest,
|
||||
MoveValueResponse as PbMoveValueResponse, PutRequest as PbPutRequest,
|
||||
PutResponse as PbPutResponse, RangeRequest as PbRangeRequest,
|
||||
RangeResponse as PbRangeResponse,
|
||||
KeyValue as PbKeyValue, PutRequest as PbPutRequest, PutResponse as PbPutResponse,
|
||||
RangeRequest as PbRangeRequest, RangeResponse as PbRangeResponse,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -1150,34 +1104,4 @@ mod tests {
|
||||
assert_eq!(b"v2".to_vec(), kv1.value().to_vec());
|
||||
assert_eq!(b"v2".to_vec(), kv1.take_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_move_value_request_trans() {
|
||||
let (from_key, to_key) = (b"test_key1".to_vec(), b"test_key2".to_vec());
|
||||
|
||||
let req = MoveValueRequest::new(from_key.clone(), to_key.clone());
|
||||
|
||||
let into_req: PbMoveValueRequest = req.into();
|
||||
assert!(into_req.header.is_none());
|
||||
assert_eq!(from_key, into_req.from_key);
|
||||
assert_eq!(to_key, into_req.to_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_move_value_response_trans() {
|
||||
let pb_res = PbMoveValueResponse {
|
||||
header: None,
|
||||
kv: Some(PbKeyValue {
|
||||
key: b"k1".to_vec(),
|
||||
value: b"v1".to_vec(),
|
||||
}),
|
||||
};
|
||||
|
||||
let mut res: MoveValueResponse = pb_res.try_into().unwrap();
|
||||
let mut kv = res.take_kv().unwrap();
|
||||
assert_eq!(b"k1".to_vec(), kv.key().to_vec());
|
||||
assert_eq!(b"k1".to_vec(), kv.take_key());
|
||||
assert_eq!(b"v1".to_vec(), kv.value().to_vec());
|
||||
assert_eq!(b"v1".to_vec(), kv.take_value());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,15 +166,14 @@ mod tests {
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, CompareAndPutResponse, DeleteRangeRequest,
|
||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::default());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let initial = 1024;
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_store);
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_backend);
|
||||
|
||||
for i in initial..initial + 100 {
|
||||
assert_eq!(i, seq.next().await.unwrap());
|
||||
@@ -183,9 +182,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sequence_out_of_rage() {
|
||||
let kv_store = Arc::new(MemoryKvBackend::default());
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let initial = u64::MAX - 10;
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_store);
|
||||
let seq = Sequence::new("test_seq", initial, 10, kv_backend);
|
||||
|
||||
for _ in 0..10 {
|
||||
let _ = seq.next().await.unwrap();
|
||||
@@ -247,14 +246,10 @@ mod tests {
|
||||
async fn batch_delete(&self, _: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
async fn move_value(&self, _: MoveValueRequest) -> Result<MoveValueResponse> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
let kv_store = Arc::new(Noop {});
|
||||
let seq = Sequence::new("test_seq", 0, 10, kv_store);
|
||||
let kv_backend = Arc::new(Noop {});
|
||||
let seq = Sequence::new("test_seq", 0, 10, kv_backend);
|
||||
|
||||
let next = seq.next().await;
|
||||
assert!(next.is_err());
|
||||
|
||||
@@ -34,6 +34,9 @@ pub enum Error {
|
||||
#[snafu(display("Loader {} is already registered", name))]
|
||||
LoaderConflict { name: String, location: Location },
|
||||
|
||||
#[snafu(display("Procedure Manager is stopped"))]
|
||||
ManagerNotStart { location: Location },
|
||||
|
||||
#[snafu(display("Failed to serialize to json"))]
|
||||
ToJson {
|
||||
#[snafu(source)]
|
||||
@@ -148,7 +151,8 @@ impl ErrorExt for Error {
|
||||
| Error::FromJson { .. }
|
||||
| Error::RetryTimesExceeded { .. }
|
||||
| Error::RetryLater { .. }
|
||||
| Error::WaitWatcher { .. } => StatusCode::Internal,
|
||||
| Error::WaitWatcher { .. }
|
||||
| Error::ManagerNotStart { .. } => StatusCode::Internal,
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user