mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-03 20:02:54 +00:00
Compare commits
99 Commits
v0.4.0-nig
...
v0.5.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6e87ac0a0e | ||
|
|
d89cfd0d4d | ||
|
|
8a0054aa89 | ||
|
|
f859932745 | ||
|
|
9a8fc08e6a | ||
|
|
825e4beead | ||
|
|
0a23b40321 | ||
|
|
cf6ef0a30d | ||
|
|
65a659d136 | ||
|
|
62bcb45787 | ||
|
|
94f3542a4f | ||
|
|
fc3bc5327d | ||
|
|
9e33ddceea | ||
|
|
c9bdf4ff9f | ||
|
|
0a9972aa9a | ||
|
|
76d5b710c8 | ||
|
|
fe02366ce6 | ||
|
|
d7aeb369a6 | ||
|
|
9284bb7a2b | ||
|
|
e23dd5a44f | ||
|
|
c60b59adc8 | ||
|
|
c9c2b3c91f | ||
|
|
7f75190fce | ||
|
|
0a394c73a2 | ||
|
|
ae95f23e05 | ||
|
|
6b39f5923d | ||
|
|
ed725d030f | ||
|
|
4fe7e162af | ||
|
|
8a5ef826b9 | ||
|
|
07be50403e | ||
|
|
8bdef9a348 | ||
|
|
d4577e7372 | ||
|
|
88f26673f0 | ||
|
|
19f300fc5a | ||
|
|
cc83764331 | ||
|
|
81aa7a4caf | ||
|
|
d68dd1f3eb | ||
|
|
9b3470b049 | ||
|
|
8cc862ff8a | ||
|
|
81ccb58fb4 | ||
|
|
ce3c10a86e | ||
|
|
007f7ba03c | ||
|
|
dfe68a7e0b | ||
|
|
d5e4fcaaff | ||
|
|
17b385a985 | ||
|
|
067917845f | ||
|
|
a680133acc | ||
|
|
0593c3bde3 | ||
|
|
0292445476 | ||
|
|
ff15bc41d6 | ||
|
|
657542c0b8 | ||
|
|
0ad3fb6040 | ||
|
|
b44e39f897 | ||
|
|
f50f2a84a9 | ||
|
|
fe783c7c1f | ||
|
|
00fe7d104e | ||
|
|
201acd152d | ||
|
|
04dbd835a1 | ||
|
|
e3d333258b | ||
|
|
10ecc30817 | ||
|
|
52ac093110 | ||
|
|
1f1d72bdb8 | ||
|
|
7edafc3407 | ||
|
|
ccd6de8d6b | ||
|
|
ee8d472aae | ||
|
|
9282e59a3b | ||
|
|
fbe2f2df46 | ||
|
|
db6ceda5f0 | ||
|
|
e352fb4495 | ||
|
|
a6116bb866 | ||
|
|
515ce825bd | ||
|
|
7fc9604735 | ||
|
|
a4282415f7 | ||
|
|
0bf26642a4 | ||
|
|
230a3026ad | ||
|
|
54e506a494 | ||
|
|
7ecfaa240f | ||
|
|
c0f080df26 | ||
|
|
f9351e4fb5 | ||
|
|
00272d53cc | ||
|
|
7310ec0bb3 | ||
|
|
73842f10e7 | ||
|
|
32d1d68441 | ||
|
|
ffa729cdf5 | ||
|
|
9d0de25bff | ||
|
|
aef9e7bfc3 | ||
|
|
c6e95ffe63 | ||
|
|
c9f8b9c7c3 | ||
|
|
688e64632d | ||
|
|
8e5eaf5472 | ||
|
|
621c6f371b | ||
|
|
4c7ad44605 | ||
|
|
6306aeabf0 | ||
|
|
40781ec754 | ||
|
|
c7b490e1a0 | ||
|
|
e3f53a8060 | ||
|
|
580d11b1e1 | ||
|
|
20f4f7971a | ||
|
|
9863e501f1 |
@@ -1,93 +0,0 @@
|
||||
name: Build and push dev-builder image
|
||||
description: Build and push dev-builder image to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
acr-image-registry:
|
||||
description: The ACR image registry to store the images
|
||||
required: true
|
||||
acr-image-registry-username:
|
||||
description: The ACR username to login to the image registry
|
||||
required: true
|
||||
acr-image-registry-password:
|
||||
description: The ACR password to login to the image registry
|
||||
required: true
|
||||
acr-image-namespace:
|
||||
description: The ACR namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to dockerhub
|
||||
shell: bash
|
||||
run:
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Login to ACR
|
||||
uses: docker/login-action@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ inputs.acr-image-registry }}
|
||||
username: ${{ inputs.acr-image-registry-username }}
|
||||
password: ${{ inputs.acr-image-registry-password }}
|
||||
|
||||
- name: Build and push ubuntu dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push centos dev builder image to ACR
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Build and push dev-builder images
|
||||
description: Build and push dev-builder images to DockerHub and ACR
|
||||
inputs:
|
||||
dockerhub-image-registry:
|
||||
description: The dockerhub image registry to store the images
|
||||
required: false
|
||||
default: docker.io
|
||||
dockerhub-image-registry-username:
|
||||
description: The dockerhub username to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-registry-token:
|
||||
description: The dockerhub token to login to the image registry
|
||||
required: true
|
||||
dockerhub-image-namespace:
|
||||
description: The dockerhub namespace of the image registry to store the images
|
||||
required: false
|
||||
default: greptime
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to Dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ inputs.dockerhub-image-registry }}
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-android == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
48
.github/actions/build-greptime-binary/action.yml
vendored
48
.github/actions/build-greptime-binary/action.yml
vendored
@@ -16,35 +16,20 @@ inputs:
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
default: .
|
||||
build-android-artifacts:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make build-by-dev-builder \
|
||||
@@ -54,14 +39,25 @@ runs:
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: ./target/aarch64-linux-android/release/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
48
.github/actions/build-linux-artifacts/action.yml
vendored
48
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -13,30 +13,10 @@ inputs:
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -68,12 +48,6 @@ runs:
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
@@ -85,12 +59,6 @@ runs:
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
@@ -107,10 +75,14 @@ runs:
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
|
||||
21
.github/actions/build-macos-artifacts/action.yml
vendored
21
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -19,25 +19,9 @@ inputs:
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -103,8 +87,3 @@ runs:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
||||
|
||||
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Build Windows artifacts
|
||||
description: Build Windows artifacts
|
||||
inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
features:
|
||||
description: Cargo features to build
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
disable-run-tests:
|
||||
description: Disable running integration tests
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
|
||||
- name: Install latest nextest release # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Release artifacts
|
||||
description: Release artifacts
|
||||
name: Publish GitHub release
|
||||
description: Publish GitHub release
|
||||
inputs:
|
||||
version:
|
||||
description: Version to release
|
||||
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: Release CN artifacts
|
||||
description: Release artifacts to CN region
|
||||
inputs:
|
||||
src-image-registry:
|
||||
description: The source image registry to store the images
|
||||
required: true
|
||||
default: docker.io
|
||||
src-image-namespace:
|
||||
description: The namespace of the source image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
src-image-name:
|
||||
description: The name of the source image
|
||||
required: false
|
||||
default: greptimedb
|
||||
dst-image-registry:
|
||||
description: The destination image registry to store the images
|
||||
required: true
|
||||
dst-image-namespace:
|
||||
description: The namespace of the destination image registry to store the images
|
||||
required: true
|
||||
default: greptime
|
||||
dst-image-registry-username:
|
||||
description: The username to login to the image registry
|
||||
required: true
|
||||
dst-image-registry-password:
|
||||
description: The password to login to the image registry
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
dev-mode:
|
||||
description: Enable dev mode, only push standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag of the image
|
||||
required: false
|
||||
default: 'true'
|
||||
aws-cn-s3-bucket:
|
||||
description: S3 bucket to store released artifacts in CN region
|
||||
required: true
|
||||
aws-cn-access-key-id:
|
||||
description: AWS access key id in CN region
|
||||
required: true
|
||||
aws-cn-secret-access-key:
|
||||
description: AWS secret access key in CN region
|
||||
required: true
|
||||
aws-cn-region:
|
||||
description: AWS region in CN
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: false
|
||||
default: 'artifacts'
|
||||
update-version-info:
|
||||
description: Update the version info in S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30" # minutes
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Release artifacts to cn region
|
||||
uses: nick-invision/retry@v2
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
./.github/scripts/upload-artifacts-to-s3.sh \
|
||||
${{ inputs.artifacts-dir }} \
|
||||
${{ inputs.version }} \
|
||||
${{ inputs.aws-cn-s3-bucket }}
|
||||
|
||||
- name: Push greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push latest greptimedb image from Dockerhub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
88
.github/actions/upload-artifacts/action.yml
vendored
88
.github/actions/upload-artifacts/action.yml
vendored
@@ -10,34 +10,6 @@ inputs:
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
release-to-s3-bucket:
|
||||
description: S3 bucket to store released artifacts
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: AWS access key id
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: AWS secret access key
|
||||
required: true
|
||||
aws-region:
|
||||
description: AWS region
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-latest-artifacts:
|
||||
description: Upload the latest artifacts to S3
|
||||
required: false
|
||||
default: 'true'
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "10" # minutes
|
||||
working-dir:
|
||||
description: Working directory to upload the artifacts
|
||||
required: false
|
||||
@@ -61,9 +33,21 @@ runs:
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
|
||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Calculate checksum
|
||||
if: runner.os != 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Calculate checksum on Windows
|
||||
if: runner.os == 'Windows'
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: pwsh
|
||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||
- name: Upload artifacts
|
||||
@@ -77,49 +61,3 @@ runs:
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Upload artifacts to S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
- name: Upload latest artifacts to S3
|
||||
if: ${{ inputs.upload-to-s3 == 'true' && inputs.upload-latest-artifacts == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
|
||||
uses: nick-invision/retry@v2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||
command: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.tar.gz \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.tar.gz && \
|
||||
aws s3 cp \
|
||||
${{ inputs.artifacts-dir }}.sha256sum \
|
||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
47
.github/scripts/copy-image.sh
vendored
Executable file
47
.github/scripts/copy-image.sh
vendored
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
SRC_IMAGE=$1
|
||||
DST_REGISTRY=$2
|
||||
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Copies images from DockerHub to the destination registry.
|
||||
function copy_images_from_dockerhub() {
|
||||
# Check if docker is installed.
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "docker is not installed. Please install docker to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract the name and tag of the source image.
|
||||
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
|
||||
|
||||
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
|
||||
|
||||
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://"$DST_REGISTRY/$IMAGE_NAME"
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
copy_images_from_dockerhub
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
|
||||
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
main
|
||||
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
ARTIFACTS_DIR=$1
|
||||
VERSION=$2
|
||||
AWS_S3_BUCKET=$3
|
||||
RELEASE_DIRS="releases/greptimedb"
|
||||
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
|
||||
|
||||
# Check if necessary variables are set.
|
||||
function check_vars() {
|
||||
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "$var is not set or empty."
|
||||
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Uploads artifacts to AWS S3 bucket.
|
||||
function upload_artifacts() {
|
||||
# The bucket layout will be:
|
||||
# releases/greptimedb
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
done
|
||||
}
|
||||
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
aws s3 cp \
|
||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||
fi
|
||||
|
||||
# If it's the nightly release, update latest-nightly-version.txt.
|
||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||
echo "Updating latest-nightly-version.txt"
|
||||
echo "$VERSION" > latest-nightly-version.txt
|
||||
aws s3 cp \
|
||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
|
||||
function download_artifacts_from_github() {
|
||||
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
|
||||
# Check if jq is installed.
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "jq is not installed. Please install jq to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest release API response.
|
||||
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
|
||||
|
||||
# Extract download URLs for the artifacts.
|
||||
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
|
||||
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
|
||||
|
||||
# Download each asset.
|
||||
while IFS= read -r url; do
|
||||
if [ -n "$url" ]; then
|
||||
curl -LJO "$url"
|
||||
echo "Downloaded: $url"
|
||||
fi
|
||||
done <<< "$ASSET_URLS"
|
||||
fi
|
||||
}
|
||||
|
||||
function main() {
|
||||
check_vars
|
||||
download_artifacts_from_github
|
||||
upload_artifacts
|
||||
update_version_info
|
||||
}
|
||||
|
||||
# Usage example:
|
||||
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
|
||||
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
|
||||
# AWS_DEFAULT_REGION=<your_region> \
|
||||
# UPDATE_VERSION_INFO=true \
|
||||
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
|
||||
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
|
||||
main
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -17,7 +17,7 @@ env:
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
67
.github/workflows/dev-build.yml
vendored
67
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -164,12 +164,7 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
@@ -198,12 +193,7 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
upload-to-s3: false # No need to upload to S3.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
@@ -214,7 +204,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
@@ -239,41 +229,44 @@ jobs:
|
||||
run: |
|
||||
echo "build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -298,7 +291,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -325,7 +318,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
55
.github/workflows/develop.yml
vendored
55
.github/workflows/develop.yml
vendored
@@ -34,7 +34,7 @@ env:
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
toml:
|
||||
name: Toml Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
||||
os: [ ubuntu-20.04-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -188,44 +188,3 @@ jobs:
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
test-on-windows:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: 'echo "temporary disabled"'
|
||||
# - run: git config --global core.autocrlf false
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: arduino/setup-protoc@v1
|
||||
# with:
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# - name: Install Rust toolchain
|
||||
# uses: dtolnay/rust-toolchain@master
|
||||
# with:
|
||||
# toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
# components: llvm-tools-preview
|
||||
# - name: Rust Cache
|
||||
# uses: Swatinem/rust-cache@v2
|
||||
# - name: Install Cargo Nextest
|
||||
# uses: taiki-e/install-action@nextest
|
||||
# - name: Install Python
|
||||
# uses: actions/setup-python@v4
|
||||
# with:
|
||||
# python-version: '3.10'
|
||||
# - name: Install PyArrow Package
|
||||
# run: pip install pyarrow
|
||||
# - name: Install WSL distribution
|
||||
# uses: Vampire/setup-wsl@v2
|
||||
# with:
|
||||
# distribution: Ubuntu-22.04
|
||||
# - name: Running tests
|
||||
# run: cargo nextest run -F pyo3_backend,dashboard
|
||||
# env:
|
||||
# RUST_BACKTRACE: 1
|
||||
# CARGO_INCREMENTAL: 0
|
||||
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
# GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
# UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
4
.github/workflows/doc-issue.yml
vendored
4
.github/workflows/doc-issue.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
|
||||
12
.github/workflows/docs.yml
vendored
12
.github/workflows/docs.yml
vendored
@@ -30,7 +30,7 @@ name: CI
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
@@ -38,33 +38,33 @@ jobs:
|
||||
check:
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
2
.github/workflows/license.yaml
vendored
2
.github/workflows/license.yaml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
62
.github/workflows/nightly-build.yml
vendored
62
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -147,11 +147,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-latest-artifacts: false
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -171,11 +166,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-latest-artifacts: false
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -185,7 +175,7 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
@@ -208,15 +198,14 @@ jobs:
|
||||
run: |
|
||||
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -226,21 +215,30 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -265,7 +263,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
@@ -292,7 +290,7 @@ jobs:
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
|
||||
82
.github/workflows/nightly-ci.yml
vendored
Normal file
82
.github/workflows/nightly-ci.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
4
.github/workflows/pr-title-checker.yml
vendored
4
.github/workflows/pr-title-checker.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
|
||||
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-ubuntu image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_centos_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-centos image
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_android_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ inputs.version }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
142
.github/workflows/release.yml
vendored
142
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-latest
|
||||
- ubuntu-latest-8-cores
|
||||
- ubuntu-latest-16-cores
|
||||
- ubuntu-latest-32-cores
|
||||
- ubuntu-latest-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -63,7 +63,12 @@ on:
|
||||
description: Build macos artifacts
|
||||
required: false
|
||||
default: false
|
||||
release_artifacts:
|
||||
build_windows_artifacts:
|
||||
type: boolean
|
||||
description: Build Windows artifacts
|
||||
required: false
|
||||
default: false
|
||||
publish_github_release:
|
||||
type: boolean
|
||||
description: Create GitHub release and upload artifacts
|
||||
required: false
|
||||
@@ -73,11 +78,6 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: false
|
||||
release_dev_builder_image:
|
||||
type: boolean
|
||||
description: Release dev-builder image
|
||||
required: false
|
||||
default: false
|
||||
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
@@ -91,17 +91,18 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.4.0
|
||||
NEXT_RELEASE_VERSION: v0.5.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -177,11 +178,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -201,11 +197,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
@@ -247,12 +238,43 @@ jobs:
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -277,15 +299,14 @@ jobs:
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-images-to-acr:
|
||||
name: Build and push images to ACR
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -295,18 +316,28 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push images to ACR
|
||||
uses: ./.github/actions/build-images
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
update-version-info: true
|
||||
push-latest-tag: true
|
||||
|
||||
release-artifacts:
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -314,36 +345,17 @@ jobs:
|
||||
build-macos-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Release artifacts
|
||||
uses: ./.github/actions/release-artifacts
|
||||
- name: Publish GitHub release
|
||||
uses: ./.github/actions/publish-github-release
|
||||
with:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
release-dev-builder-image:
|
||||
name: Release dev builder image
|
||||
if: ${{ inputs.release_dev_builder_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and push dev builder image
|
||||
uses: ./.github/actions/build-dev-builder-image
|
||||
with:
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
@@ -351,7 +363,7 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
@@ -376,7 +388,7 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
|
||||
348
Cargo.lock
generated
348
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
38
Cargo.toml
38
Cargo.toml
@@ -39,6 +39,7 @@ members = [
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/query",
|
||||
"src/script",
|
||||
@@ -54,39 +55,43 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0-nightly"
|
||||
version = "0.4.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "43.0" }
|
||||
etcd-client = "0.11"
|
||||
arrow-array = "43.0"
|
||||
arrow-flight = "43.0"
|
||||
arrow-schema = { version = "43.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "c0b0fca548e99d020c76e1a1cd7132aab26000e1" }
|
||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.11"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "9d3f28d07d29607d0e3c1823f4a4d2bc229d05b9" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
metrics = "0.20"
|
||||
moka = "0.12"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
||||
parquet = "43.0"
|
||||
paste = "1.0"
|
||||
prost = "0.11"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
@@ -108,8 +113,6 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.7"
|
||||
tonic = { version = "0.9", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
metrics = "0.20"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
@@ -122,19 +125,18 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
@@ -148,20 +150,20 @@ meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
mito = { path = "src/mito" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
operator = { path = "src/operator" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
partition = { path = "src/partition" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
query = { path = "src/query" }
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
storage = { path = "src/storage" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
table-procedure = { path = "src/table-procedure" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
|
||||
31
Makefile
31
Makefile
@@ -55,11 +55,15 @@ else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_BUILD_EXTRA_OPTS)),)
|
||||
CARGO_BUILD_OPTS += ${CARGO_BUILD_EXTRA_OPTS}
|
||||
endif
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build debug version greptime.
|
||||
cargo build ${CARGO_BUILD_OPTS}
|
||||
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
||||
|
||||
.POHNY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
@@ -67,11 +71,34 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
FEATURES=${FEATURES} \
|
||||
TARGET_DIR=${TARGET_DIR} \
|
||||
TARGET=${TARGET} \
|
||||
RELEASE=${RELEASE}
|
||||
RELEASE=${RELEASE} \
|
||||
CARGO_BUILD_EXTRA_OPTS="${CARGO_BUILD_EXTRA_OPTS}"
|
||||
|
||||
.PHONY: build-android-bin
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
FEATURES="${FEATURES}" \
|
||||
TARGET_DIR="${TARGET_DIR}" \
|
||||
TARGET="${TARGET}" \
|
||||
RELEASE="${RELEASE}" \
|
||||
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
|
||||
|
||||
.PHONY: strip-android-bin
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
|
||||
12
README.md
12
README.md
@@ -27,6 +27,14 @@
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
## Upcoming Event
|
||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
||||
<p align="center">
|
||||
<picture>
|
||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## What is GreptimeDB
|
||||
|
||||
GreptimeDB is an open-source time-series database with a special focus on
|
||||
@@ -96,11 +104,11 @@ Or if you built from docker:
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/try-out-greptimedb) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
|
||||
@@ -6,8 +6,10 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client = { workspace = true }
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
|
||||
@@ -29,14 +29,14 @@ use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
const TABLE_NAME: &str = "nyc_taxi";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
@@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
@@ -104,7 +109,7 @@ async fn write_data(
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
@@ -113,7 +118,7 @@ async fn write_data(
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
let _ = db.insert(requests).await.unwrap();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
@@ -131,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
@@ -141,8 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
// datatype and semantic_type are set to default
|
||||
..Default::default()
|
||||
semantic_type: semantic_type as i32,
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
@@ -243,11 +252,11 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr() -> CreateTableExpr {
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: TABLE_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: "".to_string(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
@@ -261,7 +270,7 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
@@ -405,31 +414,31 @@ fn create_table_expr() -> CreateTableExpr {
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: false,
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set() -> HashMap<String, String> {
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database) {
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr()).await;
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
@@ -447,8 +456,10 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
@@ -457,24 +468,32 @@ async fn do_write(args: &Args, db: &Database) {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let _ = write_jobs
|
||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database) {
|
||||
for (query_name, query) in query_set() {
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let _res = db.sql(&query).await.unwrap();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis()
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -491,13 +510,14 @@ fn main() {
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db).await;
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db).await;
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,17 +13,19 @@ rpc_runtime_size = 8
|
||||
require_lease_before_startup = false
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
|
||||
interval_millis = 5000
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
interval = "3s"
|
||||
|
||||
# Metasrv client options.
|
||||
[meta_client]
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
# Operation timeout in milliseconds, 3000 by default.
|
||||
timeout_millis = 3000
|
||||
# Connect server timeout in milliseconds, 5000 by default.
|
||||
connect_timeout_millis = 5000
|
||||
# Heartbeat timeout, 500 milliseconds by default.
|
||||
heartbeat_timeout = "500ms"
|
||||
# Operation timeout, 3 seconds by default.
|
||||
timeout = "3s"
|
||||
# Connect server timeout, 1 second by default.
|
||||
connect_timeout = "1s"
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -45,6 +47,12 @@ type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options, see `standalone.example.toml`.
|
||||
[storage.compaction]
|
||||
max_inflight_tasks = 4
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
mode = "distributed"
|
||||
|
||||
[heartbeat]
|
||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
||||
interval_millis = 5000
|
||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
||||
retry_interval_millis = 5000
|
||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||
interval = "5s"
|
||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||
retry_interval = "5s"
|
||||
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http_options]
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
body_limit = "64MB"
|
||||
@@ -59,10 +59,10 @@ enable = true
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
timeout = "3s"
|
||||
# DDL timeouts options.
|
||||
ddl_timeout_millis = 10000
|
||||
connect_timeout_millis = 5000
|
||||
ddl_timeout = "10s"
|
||||
connect_timeout = "1s"
|
||||
tcp_nodelay = true
|
||||
|
||||
# Log options, see `standalone.example.toml`
|
||||
|
||||
@@ -32,6 +32,6 @@ retry_delay = "500ms"
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout_millis = 10000
|
||||
# connect_timeout_millis = 10000
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
|
||||
@@ -93,8 +93,8 @@ read_batch_size = 128
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
# Kv options.
|
||||
[kv_store]
|
||||
# Metadata storage options.
|
||||
[metadata_store]
|
||||
# Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
@@ -115,6 +115,10 @@ data_home = "/tmp/greptimedb/"
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Compaction options.
|
||||
[storage.compaction]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
FROM ubuntu:20.04 as builder
|
||||
|
||||
ARG CARGO_PROFILE
|
||||
ARG FEATURES
|
||||
@@ -7,6 +7,11 @@ ARG OUTPUT_DIR
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y \
|
||||
|
||||
41
docker/dev-builder/android/Dockerfile
Normal file
41
docker/dev-builder/android/Dockerfile
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM --platform=linux/amd64 saschpe/android-ndk:34-jdk17.0.8_7-ndk25.2.9519653-cmake3.22.1
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Rename libunwind to libgcc
|
||||
RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libunwind.a ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/linux/aarch64/libgcc.a
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libssl-dev \
|
||||
protobuf-compiler \
|
||||
curl \
|
||||
git \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install pyarrow
|
||||
|
||||
# Trust workdir
|
||||
RUN git config --global --add safe.directory /greptimedb
|
||||
|
||||
# Install Rust.
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||
ENV PATH /root/.cargo/bin/:$PATH
|
||||
|
||||
# Add android toolchains
|
||||
ARG RUST_TOOLCHAIN
|
||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
RUN rustup target add aarch64-linux-android
|
||||
|
||||
# Install cargo-ndk
|
||||
RUN cargo install cargo-ndk
|
||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||
|
||||
# Builder entrypoint.
|
||||
CMD ["cargo", "ndk", "--platform", "23", "-t", "aarch64-linux-android", "build", "--bin", "greptime", "--profile", "release", "--no-default-features"]
|
||||
@@ -1,8 +1,13 @@
|
||||
FROM ubuntu:22.04
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
WORKDIR /greptimedb
|
||||
|
||||
# Add PPA for Python 3.10.
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa -y
|
||||
|
||||
# Install dependencies.
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl-dev \
|
||||
|
||||
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
BIN
docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# TSBS benchmark - v0.4.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Aliyun amd64
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | ecs.g7.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 |
|
||||
|
||||
### Aliyun arm64
|
||||
|
||||
| | |
|
||||
| ------- | ----------------- |
|
||||
| Machine | ecs.g8y.4xlarge |
|
||||
| CPU | 16 core |
|
||||
| Memory | 64GB |
|
||||
| Disk | 100G |
|
||||
| OS | Ubuntu 22.04 ARM |
|
||||
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate(rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 365280.60 |
|
||||
| Aliyun g7.4xlarge | 341368.72 |
|
||||
| Aliyun g8y.4xlarge | 320907.29 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | Aliyun g7.4xlarge (ms) | Aliyun g8y.4xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- | ----------------------- |
|
||||
| cpu-max-all-1 | 50.70 | 31.46 | 47.61 |
|
||||
| cpu-max-all-8 | 262.16 | 129.26 | 152.43 |
|
||||
| double-groupby-1 | 2512.71 | 1408.19 | 1586.10 |
|
||||
| double-groupby-5 | 3896.15 | 2304.29 | 2585.29 |
|
||||
| double-groupby-all | 5404.67 | 3337.61 | 3773.91 |
|
||||
| groupby-orderby-limit | 3786.98 | 2065.72 | 2312.57 |
|
||||
| high-cpu-1 | 71.96 | 37.29 | 54.01 |
|
||||
| high-cpu-all | 9468.75 | 7595.69 | 8467.46 |
|
||||
| lastpoint | 13379.43 | 11253.76 | 12949.40 |
|
||||
| single-groupby-1-1-1 | 20.72 | 12.16 | 13.35 |
|
||||
| single-groupby-1-1-12 | 28.53 | 15.67 | 21.62 |
|
||||
| single-groupby-1-8-1 | 72.23 | 37.90 | 43.52 |
|
||||
| single-groupby-5-1-1 | 26.75 | 15.59 | 17.48 |
|
||||
| single-groupby-5-1-12 | 45.41 | 22.90 | 31.96 |
|
||||
| single-groupby-5-8-1 | 107.96 | 59.76 | 69.58 |
|
||||
@@ -7,6 +7,7 @@ license.workspace = true
|
||||
[dependencies]
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
greptime-proto.workspace = true
|
||||
|
||||
@@ -16,14 +16,16 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::prelude::*;
|
||||
use snafu::Location;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType { datatype: i32, location: Location },
|
||||
@@ -34,22 +36,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to convert column default constraint, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid column default constraint, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
location: Location,
|
||||
|
||||
@@ -4,8 +4,6 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = []
|
||||
testing = []
|
||||
@@ -14,6 +12,7 @@ testing = []
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
digest = "0.10"
|
||||
hex = { version = "0.4" }
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid config value: {}, {}", value, msg))]
|
||||
InvalidConfig { value: String, msg: String },
|
||||
@@ -28,13 +30,14 @@ pub enum Error {
|
||||
#[snafu(display("Internal state error: {}", msg))]
|
||||
InternalState { msg: String },
|
||||
|
||||
#[snafu(display("IO error, source: {}", source))]
|
||||
#[snafu(display("IO error"))]
|
||||
Io {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed, source: {}", source))]
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
|
||||
@@ -16,6 +16,7 @@ async-trait = "0.1"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
@@ -30,7 +31,7 @@ futures-util.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
metrics.workspace = true
|
||||
moka = { version = "0.11", features = ["future"] }
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
partition.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
@@ -17,53 +17,48 @@ use std::fmt::Debug;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to list catalogs, source: {}", source))]
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}'s schemas, source: {}", catalog, source))]
|
||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||
ListSchemas {
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to re-compile script due to internal error, source: {}",
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to create table, table info: {}, source: {}",
|
||||
table_info,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
location: Location,
|
||||
@@ -97,13 +92,14 @@ pub enum Error {
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue { location: Location },
|
||||
|
||||
#[snafu(display("Failed to deserialize value, source: {}", source))]
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
source: serde_json::error::Error,
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}, source: {}", engine_name, source))]
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
location: Location,
|
||||
@@ -141,15 +137,18 @@ pub enum Error {
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported { op: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open table {table_id}, source: {source}, at {location}"))]
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_id: TableId,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel, source: {}", source))]
|
||||
ParallelOpenTable { source: JoinError },
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
@@ -163,58 +162,52 @@ pub enum Error {
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch, source: {}", source))]
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to insert table creation record to system catalog, source: {}",
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("{source}"))]
|
||||
#[snafu(display(""))]
|
||||
Internal {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to upgrade weak catalog manager reference. location: {}",
|
||||
location
|
||||
))]
|
||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value, source: {}", source))]
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation, source: {}", source))]
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
MetaSrv {
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog, source: {}", source))]
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
@@ -223,14 +216,14 @@ pub enum Error {
|
||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||
QueryAccessDenied { catalog: String, schema: String },
|
||||
|
||||
#[snafu(display("{}: {}", msg, source))]
|
||||
#[snafu(display(""))]
|
||||
Datafusion {
|
||||
msg: String,
|
||||
source: DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch, source: {}", source))]
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
@@ -239,7 +232,7 @@ pub enum Error {
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error: {}", source))]
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
location: Location,
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use client::{CachedMetaKvBackend, MetaKvBackend};
|
||||
|
||||
mod client;
|
||||
@@ -22,18 +20,3 @@ mod manager;
|
||||
#[cfg(feature = "testing")]
|
||||
pub mod mock;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
|
||||
/// KvBackend cache invalidator
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvCacheInvalidator: Send + Sync {
|
||||
async fn invalidate_key(&self, key: &[u8]);
|
||||
}
|
||||
|
||||
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
|
||||
|
||||
pub struct DummyKvCacheInvalidator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for DummyKvCacheInvalidator {
|
||||
async fn invalidate_key(&self, _key: &[u8]) {}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::KvCacheInvalidator;
|
||||
use common_meta::error::Error::{CacheNotGet, GetKvCache};
|
||||
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, Result};
|
||||
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
||||
@@ -28,12 +29,11 @@ use common_meta::rpc::store::{
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_telemetry::timer;
|
||||
use common_telemetry::{debug, timer};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use super::KvCacheInvalidator;
|
||||
use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
|
||||
|
||||
const CACHE_MAX_CAPACITY: u64 = 10000;
|
||||
@@ -197,7 +197,8 @@ impl KvBackend for CachedMetaKvBackend {
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for CachedMetaKvBackend {
|
||||
async fn invalidate_key(&self, key: &[u8]) {
|
||||
self.cache.invalidate(key).await
|
||||
self.cache.invalidate(key).await;
|
||||
debug!("invalidated cache key: {}", String::from_utf8_lossy(key));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,18 +18,15 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, Context};
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
||||
use common_meta::error::Result as MetaResult;
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoKey;
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::table_route::TableRouteKey;
|
||||
use common_meta::key::{TableMetaKey, TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::table_name::TableName;
|
||||
use common_telemetry::debug;
|
||||
use futures_util::TryStreamExt;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
@@ -43,7 +40,6 @@ use crate::error::{
|
||||
TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
|
||||
use crate::kvbackend::KvCacheInvalidatorRef;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -56,7 +52,7 @@ pub struct KvBackendCatalogManager {
|
||||
// TODO(LFC): Maybe use a real implementation for Standalone mode.
|
||||
// Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
|
||||
// is implemented by RaftEngine. Maybe we need a cache for it?
|
||||
backend_cache_invalidator: KvCacheInvalidatorRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
@@ -66,53 +62,29 @@ pub struct KvBackendCatalogManager {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for KvBackendCatalogManager {
|
||||
async fn invalidate_table_name(&self, _ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
let key: TableNameKey = (&table_name).into();
|
||||
|
||||
self.backend_cache_invalidator
|
||||
.invalidate_key(&key.as_raw_key())
|
||||
.await;
|
||||
debug!(
|
||||
"invalidated cache key: {}",
|
||||
String::from_utf8_lossy(&key.as_raw_key())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
async fn invalidate_table_name(&self, ctx: &Context, table_name: TableName) -> MetaResult<()> {
|
||||
self.cache_invalidator
|
||||
.invalidate_table_name(ctx, table_name)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn invalidate_table_id(&self, _ctx: &Context, table_id: TableId) -> MetaResult<()> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
self.backend_cache_invalidator
|
||||
.invalidate_key(&key.as_raw_key())
|
||||
.await;
|
||||
debug!(
|
||||
"invalidated cache key: {}",
|
||||
String::from_utf8_lossy(&key.as_raw_key())
|
||||
);
|
||||
|
||||
let key = &TableRouteKey { table_id };
|
||||
self.backend_cache_invalidator
|
||||
.invalidate_key(&key.as_raw_key())
|
||||
.await;
|
||||
debug!(
|
||||
"invalidated cache key: {}",
|
||||
String::from_utf8_lossy(&key.as_raw_key())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
async fn invalidate_table_id(&self, ctx: &Context, table_id: TableId) -> MetaResult<()> {
|
||||
self.cache_invalidator
|
||||
.invalidate_table_id(ctx, table_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
backend: KvBackendRef,
|
||||
backend_cache_invalidator: KvCacheInvalidatorRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
datanode_manager: DatanodeManagerRef,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
backend_cache_invalidator,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
cache_invalidator,
|
||||
datanode_manager,
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
@@ -131,12 +103,6 @@ impl KvBackendCatalogManager {
|
||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
||||
self.datanode_manager.clone()
|
||||
}
|
||||
|
||||
pub async fn invalidate_schema(&self, catalog: &str, schema: &str) {
|
||||
let key = SchemaNameKey::new(catalog, schema).as_raw_key();
|
||||
|
||||
self.backend_cache_invalidator.invalidate_key(&key).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -253,6 +219,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
@@ -20,10 +20,8 @@ use std::any::Any;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::RegionStat;
|
||||
use common_telemetry::warn;
|
||||
use futures::future::BoxFuture;
|
||||
use table::metadata::{TableId, TableType};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::CreateTableRequest;
|
||||
use table::TableRef;
|
||||
|
||||
@@ -124,66 +122,3 @@ pub struct RegisterSchemaRequest {
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
}
|
||||
|
||||
/// The stat of regions in the datanode node.
|
||||
/// The number of regions can be got from len of vec.
|
||||
///
|
||||
/// Ignores any errors occurred during iterating regions. The intention of this method is to
|
||||
/// collect region stats that will be carried in Datanode's heartbeat to Metasrv, so it's a
|
||||
/// "try our best" job.
|
||||
pub async fn datanode_stat(catalog_manager: &CatalogManagerRef) -> (u64, Vec<RegionStat>) {
|
||||
let mut region_number: u64 = 0;
|
||||
let mut region_stats = Vec::new();
|
||||
|
||||
let Ok(catalog_names) = catalog_manager.catalog_names().await else {
|
||||
return (region_number, region_stats);
|
||||
};
|
||||
for catalog_name in catalog_names {
|
||||
let Ok(schema_names) = catalog_manager.schema_names(&catalog_name).await else {
|
||||
continue;
|
||||
};
|
||||
for schema_name in schema_names {
|
||||
let Ok(table_names) = catalog_manager
|
||||
.table_names(&catalog_name, &schema_name)
|
||||
.await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
for table_name in table_names {
|
||||
let Ok(Some(table)) = catalog_manager
|
||||
.table(&catalog_name, &schema_name, &table_name)
|
||||
.await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if table.table_type() != TableType::Base {
|
||||
continue;
|
||||
}
|
||||
|
||||
let table_info = table.table_info();
|
||||
let region_numbers = &table_info.meta.region_numbers;
|
||||
region_number += region_numbers.len() as u64;
|
||||
|
||||
let engine = &table_info.meta.engine;
|
||||
|
||||
match table.region_stats() {
|
||||
Ok(stats) => {
|
||||
let stats = stats.into_iter().map(|stat| RegionStat {
|
||||
region_id: stat.region_id,
|
||||
approximate_bytes: stat.disk_usage_bytes as i64,
|
||||
engine: engine.clone(),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
region_stats.extend(stats);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get region status, err: {:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
(region_number, region_stats)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
@@ -26,7 +27,7 @@ datatypes = { workspace = true }
|
||||
derive_builder.workspace = true
|
||||
enum_dispatch = "0.3"
|
||||
futures-util.workspace = true
|
||||
moka = { version = "0.9", features = ["future"] }
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
@@ -42,14 +42,14 @@ async fn run() {
|
||||
.insert(vec![to_insert_request(weather_records_1())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
|
||||
if let Err(e) = stream_inserter
|
||||
.insert(vec![to_insert_request(weather_records_2())])
|
||||
.await
|
||||
{
|
||||
error!("Error: {e}");
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
|
||||
let result = stream_inserter.finish().await;
|
||||
@@ -59,7 +59,7 @@ async fn run() {
|
||||
info!("Rows written: {rows}");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error: {e}");
|
||||
error!("Error: {e:?}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -138,8 +138,20 @@ impl Client {
|
||||
Ok((addr, channel))
|
||||
}
|
||||
|
||||
fn max_grpc_message_size(&self) -> usize {
|
||||
self.inner.channel_manager.config().max_message_size
|
||||
fn max_grpc_recv_message_size(&self) -> usize {
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
.max_recv_message_size
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
fn max_grpc_send_message_size(&self) -> usize {
|
||||
self.inner
|
||||
.channel_manager
|
||||
.config()
|
||||
.max_send_message_size
|
||||
.as_bytes() as usize
|
||||
}
|
||||
|
||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
@@ -147,7 +159,8 @@ impl Client {
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_message_size()),
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -155,13 +168,16 @@ impl Client {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(DatabaseClient {
|
||||
inner: GreptimeDatabaseClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_message_size()),
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(PbRegionClient::new(channel).max_decoding_message_size(self.max_grpc_message_size()))
|
||||
Ok(PbRegionClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size()))
|
||||
}
|
||||
|
||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||
|
||||
@@ -276,7 +276,7 @@ impl Database {
|
||||
source: BoxedError::new(ServerSnafu { code, msg }.build()),
|
||||
};
|
||||
logging::error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {}",
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
error
|
||||
|
||||
@@ -17,39 +17,37 @@ use std::any::Any;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||
IllegalFlightMessages { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to do Flight get, code: {}, source: {}", tonic_code, source))]
|
||||
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
|
||||
FlightGet {
|
||||
addr: String,
|
||||
tonic_code: Code,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failure occurs during handling request, location: {}, source: {}",
|
||||
location,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failure occurs during handling request"))]
|
||||
HandleRequest {
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData, source: {}", source))]
|
||||
#[snafu(display("Failed to convert FlightData"))]
|
||||
ConvertFlightData {
|
||||
location: Location,
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
@@ -61,18 +59,14 @@ pub enum Error {
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to create gRPC channel, peer address: {}, source: {}",
|
||||
addr,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
|
||||
CreateChannel {
|
||||
addr: String,
|
||||
location: Location,
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request RegionServer, code: {}, source: {}", code, source))]
|
||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
||||
RegionServer { code: Code, source: BoxedError },
|
||||
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
|
||||
@@ -26,6 +26,8 @@ use api::v1::greptime_response::Response;
|
||||
use api::v1::{AffectedRows, GreptimeResponse};
|
||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::status_code::StatusCode;
|
||||
pub use common_query::Output;
|
||||
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
|
||||
@@ -26,6 +26,7 @@ common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-config = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-meta = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
@@ -38,14 +39,17 @@ datanode = { workspace = true }
|
||||
datatypes = { workspace = true }
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine = { workspace = true }
|
||||
frontend = { workspace = true }
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client = { workspace = true }
|
||||
meta-srv = { workspace = true }
|
||||
metrics.workspace = true
|
||||
mito2 = { workspace = true }
|
||||
nu-ansi-term = "0.46"
|
||||
partition = { workspace = true }
|
||||
plugins.workspace = true
|
||||
prost.workspace = true
|
||||
query = { workspace = true }
|
||||
rand.workspace = true
|
||||
|
||||
@@ -78,7 +78,7 @@ impl Command {
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Attach(AttachCommand),
|
||||
// Attach(AttachCommand),
|
||||
Upgrade(UpgradeCommand),
|
||||
Bench(BenchTableMetadataCommand),
|
||||
}
|
||||
@@ -86,7 +86,7 @@ enum SubCommand {
|
||||
impl SubCommand {
|
||||
async fn build(self) -> Result<Instance> {
|
||||
match self {
|
||||
SubCommand::Attach(cmd) => cmd.build().await,
|
||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||
SubCommand::Upgrade(cmd) => cmd.build().await,
|
||||
SubCommand::Bench(cmd) => cmd.build().await,
|
||||
}
|
||||
@@ -104,51 +104,9 @@ pub(crate) struct AttachCommand {
|
||||
}
|
||||
|
||||
impl AttachCommand {
|
||||
#[allow(dead_code)]
|
||||
async fn build(self) -> Result<Instance> {
|
||||
let repl = Repl::try_new(&self).await?;
|
||||
Ok(Instance::Repl(repl))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
|
||||
assert!(logging_opts.level.is_none());
|
||||
assert!(!logging_opts.enable_jaeger_tracing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_level_options() {
|
||||
let cmd = Command {
|
||||
cmd: SubCommand::Attach(AttachCommand {
|
||||
grpc_addr: String::from(""),
|
||||
meta_addr: None,
|
||||
disable_helper: false,
|
||||
}),
|
||||
};
|
||||
|
||||
let opts = cmd
|
||||
.load_options(TopLevelOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
})
|
||||
.unwrap();
|
||||
let logging_opts = opts.logging_options();
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ use query::QueryEngine;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ErrorCompat, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
|
||||
use crate::cli::cmd::ReplCommand;
|
||||
@@ -148,7 +148,7 @@ impl Repl {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let status_code = e.status_code();
|
||||
let root_cause = e.iter_chain().last().unwrap();
|
||||
let root_cause = e.output_msg();
|
||||
println!("Error: {}({status_code}), {root_cause}", status_code as u32)
|
||||
})
|
||||
.is_ok()
|
||||
@@ -257,10 +257,11 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
||||
cached_meta_backend.clone(),
|
||||
datanode_clients,
|
||||
);
|
||||
let plugins: Arc<Plugins> = Default::default();
|
||||
let plugins: Plugins = Default::default();
|
||||
let state = Arc::new(QueryEngineState::new(
|
||||
catalog_list,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
plugins.clone(),
|
||||
));
|
||||
|
||||
@@ -20,7 +20,7 @@ use client::api::v1::meta::TableRouteValue;
|
||||
use common_meta::ddl::utils::region_storage_path;
|
||||
use common_meta::error as MetaError;
|
||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue};
|
||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||
@@ -405,8 +405,11 @@ impl MigrateTableMetadata {
|
||||
DatanodeTableValue::new(
|
||||
table_id,
|
||||
regions,
|
||||
engine.to_string(),
|
||||
region_storage_path.clone(),
|
||||
RegionInfo {
|
||||
engine: engine.to_string(),
|
||||
region_storage_path: region_storage_path.clone(),
|
||||
region_options: (&value.table_info.meta.options).into(),
|
||||
},
|
||||
),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -31,6 +31,10 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_datanode_plugins(self.datanode.plugins())
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||
}
|
||||
|
||||
@@ -159,11 +163,15 @@ impl StartCommand {
|
||||
Ok(Options::Datanode(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
||||
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
|
||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
logging::info!("Datanode start command: {:#?}", self);
|
||||
logging::info!("Datanode options: {:#?}", opts);
|
||||
|
||||
let datanode = DatanodeBuilder::new(opts, None, Default::default())
|
||||
let datanode = DatanodeBuilder::new(opts, None, plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -180,6 +188,7 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
@@ -196,11 +205,14 @@ mod tests {
|
||||
rpc_hostname = "127.0.0.1"
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[heartbeat]
|
||||
interval = "300ms"
|
||||
|
||||
[meta_client]
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
ddl_timeout_millis= 10000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
ddl_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
@@ -249,18 +261,26 @@ mod tests {
|
||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
||||
assert!(!options.wal.sync_write);
|
||||
|
||||
let HeartbeatOptions {
|
||||
interval: heart_beat_interval,
|
||||
..
|
||||
} = options.heartbeat;
|
||||
|
||||
assert_eq!(300, heart_beat_interval.as_millis());
|
||||
|
||||
let MetaClientOptions {
|
||||
metasrv_addrs: metasrv_addr,
|
||||
timeout_millis,
|
||||
connect_timeout_millis,
|
||||
timeout,
|
||||
connect_timeout,
|
||||
ddl_timeout,
|
||||
tcp_nodelay,
|
||||
ddl_timeout_millis,
|
||||
..
|
||||
} = options.meta_client.unwrap();
|
||||
|
||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||
assert_eq!(5000, connect_timeout_millis);
|
||||
assert_eq!(10000, ddl_timeout_millis);
|
||||
assert_eq!(3000, timeout_millis);
|
||||
assert_eq!(5000, connect_timeout.as_millis());
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
@@ -354,8 +374,8 @@ mod tests {
|
||||
rpc_runtime_size = 8
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
|
||||
@@ -16,62 +16,64 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use config::ConfigError;
|
||||
use rustyline::error::ReadlineError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to create default catalog and schema, source: {}", source))]
|
||||
#[snafu(display("Failed to create default catalog and schema"))]
|
||||
InitMetadata {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to iter stream, source: {}", source))]
|
||||
#[snafu(display("Failed to iter stream"))]
|
||||
IterStream {
|
||||
location: Location,
|
||||
source: common_meta::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start datanode, source: {}", source))]
|
||||
#[snafu(display("Failed to start datanode"))]
|
||||
StartDatanode {
|
||||
location: Location,
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown datanode, source: {}", source))]
|
||||
#[snafu(display("Failed to shutdown datanode"))]
|
||||
ShutdownDatanode {
|
||||
location: Location,
|
||||
source: datanode::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend, source: {}", source))]
|
||||
#[snafu(display("Failed to start frontend"))]
|
||||
StartFrontend {
|
||||
location: Location,
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown frontend, source: {}", source))]
|
||||
#[snafu(display("Failed to shutdown frontend"))]
|
||||
ShutdownFrontend {
|
||||
location: Location,
|
||||
source: frontend::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build meta server, source: {}", source))]
|
||||
#[snafu(display("Failed to build meta server"))]
|
||||
BuildMetaServer {
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start meta server, source: {}", source))]
|
||||
#[snafu(display("Failed to start meta server"))]
|
||||
StartMetaServer {
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to shutdown meta server, source: {}", source))]
|
||||
#[snafu(display("Failed to shutdown meta server"))]
|
||||
ShutdownMetaServer {
|
||||
location: Location,
|
||||
source: meta_srv::error::Error,
|
||||
@@ -83,13 +85,7 @@ pub enum Error {
|
||||
#[snafu(display("Illegal config: {}", msg))]
|
||||
IllegalConfig { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Illegal auth config: {}", source))]
|
||||
IllegalAuthConfig {
|
||||
location: Location,
|
||||
source: auth::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))]
|
||||
#[snafu(display("Unsupported selector type: {}", selector_type))]
|
||||
UnsupportedSelectorType {
|
||||
selector_type: String,
|
||||
location: Location,
|
||||
@@ -99,80 +95,97 @@ pub enum Error {
|
||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||
InvalidReplCommand { reason: String },
|
||||
|
||||
#[snafu(display("Cannot create REPL: {}", source))]
|
||||
#[snafu(display("Cannot create REPL"))]
|
||||
ReplCreation {
|
||||
source: ReadlineError,
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Error reading command: {}", source))]
|
||||
#[snafu(display("Error reading command"))]
|
||||
Readline {
|
||||
source: ReadlineError,
|
||||
#[snafu(source)]
|
||||
error: ReadlineError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
|
||||
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||
RequestDatabase {
|
||||
sql: String,
|
||||
location: Location,
|
||||
source: client::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to collect RecordBatches, source: {source}"))]
|
||||
#[snafu(display("Failed to collect RecordBatches"))]
|
||||
CollectRecordBatches {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
|
||||
#[snafu(display("Failed to pretty print Recordbatches"))]
|
||||
PrettyPrintRecordBatches {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start Meta client, source: {}", source))]
|
||||
#[snafu(display("Failed to start Meta client"))]
|
||||
StartMetaClient {
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse SQL: {}, source: {}", sql, source))]
|
||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||
ParseSql {
|
||||
sql: String,
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to plan statement, source: {}", source))]
|
||||
#[snafu(display("Failed to plan statement"))]
|
||||
PlanStatement {
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode logical plan in substrait, source: {}", source))]
|
||||
#[snafu(display("Failed to encode logical plan in substrait"))]
|
||||
SubstraitEncodeLogicalPlan {
|
||||
location: Location,
|
||||
source: substrait::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to load layered config, source: {}", source))]
|
||||
#[snafu(display("Failed to load layered config"))]
|
||||
LoadLayeredConfig {
|
||||
source: ConfigError,
|
||||
#[snafu(source)]
|
||||
error: ConfigError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start catalog manager, source: {}", source))]
|
||||
#[snafu(display("Failed to start catalog manager"))]
|
||||
StartCatalogManager {
|
||||
location: Location,
|
||||
source: catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}, source: {}", source))]
|
||||
#[snafu(display("Failed to connect to Etcd at {etcd_addr}"))]
|
||||
ConnectEtcd {
|
||||
etcd_addr: String,
|
||||
source: etcd_client::Error,
|
||||
#[snafu(source)]
|
||||
error: etcd_client::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to serde json"))]
|
||||
SerdeJson {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Failed to create directory {}", dir))]
|
||||
CreateDir {
|
||||
dir: String,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -195,7 +208,7 @@ impl ErrorExt for Error {
|
||||
| Error::LoadLayeredConfig { .. }
|
||||
| Error::IllegalConfig { .. }
|
||||
| Error::InvalidReplCommand { .. }
|
||||
| Error::IllegalAuthConfig { .. }
|
||||
| Error::CreateDir { .. }
|
||||
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||
@@ -208,6 +221,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||
|
||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use auth::UserProviderRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_telemetry::logging;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
@@ -25,7 +23,7 @@ use servers::tls::{TlsMode, TlsOption};
|
||||
use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, IllegalAuthConfigSnafu, Result};
|
||||
use crate::error::{self, Result, StartFrontendSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -34,10 +32,11 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.frontend
|
||||
.start()
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
@@ -88,6 +87,8 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long)]
|
||||
grpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
@@ -141,6 +142,10 @@ impl StartCommand {
|
||||
opts.http.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(http_timeout) = self.http_timeout {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout)
|
||||
}
|
||||
|
||||
if let Some(disable_dashboard) = self.disable_dashboard {
|
||||
opts.http.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
@@ -177,38 +182,32 @@ impl StartCommand {
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
|
||||
Ok(Options::Frontend(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
||||
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
logging::info!("Frontend start command: {:#?}", self);
|
||||
logging::info!("Frontend options: {:#?}", opts);
|
||||
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
|
||||
let mut instance = FeInstance::try_new_distributed(&opts, plugins.clone())
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
instance
|
||||
.build_servers(&opts)
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance { frontend: instance })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
|
||||
let plugins = Plugins::new();
|
||||
|
||||
if let Some(provider) = user_provider {
|
||||
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
|
||||
plugins.insert::<UserProviderRef>(provider);
|
||||
}
|
||||
Ok(plugins)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
@@ -218,6 +217,7 @@ mod tests {
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use frontend::service_config::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::options::ENV_VAR_SEP;
|
||||
@@ -303,14 +303,17 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
let mut fe_opts = FrontendOptions {
|
||||
http: HttpOptions {
|
||||
disable_dashboard: false,
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
disable_dashboard: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
let plugins = plugins.unwrap();
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
@@ -350,8 +353,8 @@ mod tests {
|
||||
addr = "127.0.0.1:4000"
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[mysql]
|
||||
@@ -383,7 +386,7 @@ mod tests {
|
||||
Some("11"),
|
||||
),
|
||||
(
|
||||
// http_options.addr = 127.0.0.1:24000
|
||||
// http.addr = 127.0.0.1:24000
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"http".to_uppercase(),
|
||||
|
||||
@@ -20,7 +20,7 @@ use meta_srv::bootstrap::MetaSrvInstance;
|
||||
use meta_srv::metasrv::MetaSrvOptions;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
||||
use crate::options::{Options, TopLevelOptions};
|
||||
|
||||
pub struct Instance {
|
||||
@@ -29,10 +29,10 @@ pub struct Instance {
|
||||
|
||||
impl Instance {
|
||||
pub async fn start(&mut self) -> Result<()> {
|
||||
self.instance
|
||||
.start()
|
||||
plugins::start_meta_srv_plugins(self.instance.plugins())
|
||||
.await
|
||||
.context(error::StartMetaServerSnafu)
|
||||
.context(StartMetaServerSnafu)?;
|
||||
self.instance.start().await.context(StartMetaServerSnafu)
|
||||
}
|
||||
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
@@ -158,12 +158,15 @@ impl StartCommand {
|
||||
Ok(Options::Metasrv(Box::new(opts)))
|
||||
}
|
||||
|
||||
async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
|
||||
let plugins = plugins::setup_meta_srv_plugins(&mut opts)
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
logging::info!("MetaSrv start command: {:#?}", self);
|
||||
logging::info!("MetaSrv options: {:#?}", opts);
|
||||
|
||||
let instance = MetaSrvInstance::new(opts)
|
||||
let instance = MetaSrvInstance::new(opts, plugins)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ use meta_srv::metasrv::MetaSrvOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result};
|
||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu};
|
||||
|
||||
pub const ENV_VAR_SEP: &str = "__";
|
||||
pub const ENV_LIST_SEP: &str = ",";
|
||||
@@ -29,11 +29,11 @@ pub const ENV_LIST_SEP: &str = ",";
|
||||
/// Options mixed up from datanode, frontend and metasrv.
|
||||
pub struct MixOptions {
|
||||
pub data_home: String,
|
||||
pub procedure_cfg: ProcedureConfig,
|
||||
pub kv_store_cfg: KvStoreConfig,
|
||||
pub fe_opts: FrontendOptions,
|
||||
pub dn_opts: DatanodeOptions,
|
||||
pub logging_opts: LoggingOptions,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub metadata_store: KvStoreConfig,
|
||||
pub frontend: FrontendOptions,
|
||||
pub datanode: DatanodeOptions,
|
||||
pub logging: LoggingOptions,
|
||||
}
|
||||
|
||||
pub enum Options {
|
||||
@@ -56,7 +56,7 @@ impl Options {
|
||||
Options::Datanode(opts) => &opts.logging,
|
||||
Options::Frontend(opts) => &opts.logging,
|
||||
Options::Metasrv(opts) => &opts.logging,
|
||||
Options::Standalone(opts) => &opts.logging_opts,
|
||||
Options::Standalone(opts) => &opts.logging,
|
||||
Options::Cli(opts) => opts,
|
||||
}
|
||||
}
|
||||
@@ -94,9 +94,16 @@ impl Options {
|
||||
.ignore_empty(true)
|
||||
};
|
||||
|
||||
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
|
||||
// `ConfigSerializer` cannot handle the case of an empty struct contained
|
||||
// within an iterative structure.
|
||||
// See: https://github.com/mehcode/config-rs/issues/461
|
||||
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
|
||||
let default_config = File::from_str(&json_str, FileFormat::Json);
|
||||
|
||||
// Add default values and environment variables as the sources of the configuration.
|
||||
let mut layered_config = Config::builder()
|
||||
.add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
|
||||
.add_source(default_config)
|
||||
.add_source(env_source);
|
||||
|
||||
// Add config file as the source of the configuration if it is specified.
|
||||
@@ -137,8 +144,8 @@ mod tests {
|
||||
mysql_runtime_size = 2
|
||||
|
||||
[meta_client]
|
||||
timeout_millis = 3000
|
||||
connect_timeout_millis = 5000
|
||||
timeout = "3s"
|
||||
connect_timeout = "5s"
|
||||
tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
|
||||
@@ -13,24 +13,28 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path};
|
||||
|
||||
use catalog::kvbackend::{DummyKvCacheInvalidator, KvBackendCatalogManager};
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::CatalogManagerRef;
|
||||
use clap::Parser;
|
||||
use common_base::Plugins;
|
||||
use common_config::{kv_store_dir, KvStoreConfig, WalConfig};
|
||||
use common_config::{metadata_store_dir, KvStoreConfig, WalConfig};
|
||||
use common_meta::cache_invalidator::DummyKvCacheInvalidator;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, StorageConfig};
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::service_config::{
|
||||
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
};
|
||||
use mito2::config::MitoConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -38,10 +42,9 @@ use servers::Mode;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFrontendSnafu,
|
||||
CreateDirSnafu, IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu,
|
||||
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
||||
};
|
||||
use crate::frontend::load_frontend_plugins;
|
||||
use crate::options::{MixOptions, Options, TopLevelOptions};
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -84,18 +87,21 @@ impl SubCommand {
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_telemetry: bool,
|
||||
pub http_options: HttpOptions,
|
||||
pub grpc_options: GrpcOptions,
|
||||
pub mysql_options: MysqlOptions,
|
||||
pub postgres_options: PostgresOptions,
|
||||
pub opentsdb_options: OpentsdbOptions,
|
||||
pub influxdb_options: InfluxdbOptions,
|
||||
pub prom_store_options: PromStoreOptions,
|
||||
pub http: HttpOptions,
|
||||
pub grpc: GrpcOptions,
|
||||
pub mysql: MysqlOptions,
|
||||
pub postgres: PostgresOptions,
|
||||
pub opentsdb: OpentsdbOptions,
|
||||
pub influxdb: InfluxdbOptions,
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: WalConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub kv_store: KvStoreConfig,
|
||||
pub metadata_store: KvStoreConfig,
|
||||
pub procedure: ProcedureConfig,
|
||||
pub logging: LoggingOptions,
|
||||
pub user_provider: Option<String>,
|
||||
/// Options for different store engines.
|
||||
pub region_engine: Vec<RegionEngineConfig>,
|
||||
}
|
||||
|
||||
impl Default for StandaloneOptions {
|
||||
@@ -103,18 +109,23 @@ impl Default for StandaloneOptions {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_telemetry: true,
|
||||
http_options: HttpOptions::default(),
|
||||
grpc_options: GrpcOptions::default(),
|
||||
mysql_options: MysqlOptions::default(),
|
||||
postgres_options: PostgresOptions::default(),
|
||||
opentsdb_options: OpentsdbOptions::default(),
|
||||
influxdb_options: InfluxdbOptions::default(),
|
||||
prom_store_options: PromStoreOptions::default(),
|
||||
http: HttpOptions::default(),
|
||||
grpc: GrpcOptions::default(),
|
||||
mysql: MysqlOptions::default(),
|
||||
postgres: PostgresOptions::default(),
|
||||
opentsdb: OpentsdbOptions::default(),
|
||||
influxdb: InfluxdbOptions::default(),
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: WalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
kv_store: KvStoreConfig::default(),
|
||||
metadata_store: KvStoreConfig::default(),
|
||||
procedure: ProcedureConfig::default(),
|
||||
logging: LoggingOptions::default(),
|
||||
user_provider: None,
|
||||
region_engine: vec![
|
||||
RegionEngineConfig::Mito(MitoConfig::default()),
|
||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -123,15 +134,16 @@ impl StandaloneOptions {
|
||||
fn frontend_options(self) -> FrontendOptions {
|
||||
FrontendOptions {
|
||||
mode: self.mode,
|
||||
http: self.http_options,
|
||||
grpc: self.grpc_options,
|
||||
mysql: self.mysql_options,
|
||||
postgres: self.postgres_options,
|
||||
opentsdb: self.opentsdb_options,
|
||||
influxdb: self.influxdb_options,
|
||||
prom_store: self.prom_store_options,
|
||||
http: self.http,
|
||||
grpc: self.grpc,
|
||||
mysql: self.mysql,
|
||||
postgres: self.postgres,
|
||||
opentsdb: self.opentsdb,
|
||||
influxdb: self.influxdb,
|
||||
prom_store: self.prom_store,
|
||||
meta_client: None,
|
||||
logging: self.logging,
|
||||
user_provider: self.user_provider,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -142,6 +154,7 @@ impl StandaloneOptions {
|
||||
enable_telemetry: self.enable_telemetry,
|
||||
wal: self.wal,
|
||||
storage: self.storage,
|
||||
region_engine: self.region_engine,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -231,7 +244,7 @@ impl StartCommand {
|
||||
);
|
||||
|
||||
if let Some(addr) = &self.http_addr {
|
||||
opts.http_options.addr = addr.clone()
|
||||
opts.http.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
@@ -245,42 +258,45 @@ impl StartCommand {
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
opts.grpc_options.addr = addr.clone()
|
||||
opts.grpc.addr = addr.clone()
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql_options.enable = true;
|
||||
opts.mysql_options.addr = addr.clone();
|
||||
opts.mysql_options.tls = tls_opts.clone();
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr = addr.clone();
|
||||
opts.mysql.tls = tls_opts.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.postgres_addr {
|
||||
opts.postgres_options.enable = true;
|
||||
opts.postgres_options.addr = addr.clone();
|
||||
opts.postgres_options.tls = tls_opts;
|
||||
opts.postgres.enable = true;
|
||||
opts.postgres.addr = addr.clone();
|
||||
opts.postgres.tls = tls_opts;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.opentsdb_addr {
|
||||
opts.opentsdb_options.enable = true;
|
||||
opts.opentsdb_options.addr = addr.clone();
|
||||
opts.opentsdb.enable = true;
|
||||
opts.opentsdb.addr = addr.clone();
|
||||
}
|
||||
|
||||
if self.influxdb_enable {
|
||||
opts.influxdb_options.enable = self.influxdb_enable;
|
||||
opts.influxdb.enable = self.influxdb_enable;
|
||||
}
|
||||
let kv_store_cfg = opts.kv_store.clone();
|
||||
let procedure_cfg = opts.procedure.clone();
|
||||
let fe_opts = opts.clone().frontend_options();
|
||||
let logging_opts = opts.logging.clone();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
opts.user_provider = self.user_provider.clone();
|
||||
|
||||
let metadata_store = opts.metadata_store.clone();
|
||||
let procedure = opts.procedure.clone();
|
||||
let frontend = opts.clone().frontend_options();
|
||||
let logging = opts.logging.clone();
|
||||
let datanode = opts.datanode_options();
|
||||
|
||||
Ok(Options::Standalone(Box::new(MixOptions {
|
||||
procedure_cfg,
|
||||
kv_store_cfg,
|
||||
data_home: dn_opts.storage.data_home.to_string(),
|
||||
fe_opts,
|
||||
dn_opts,
|
||||
logging_opts,
|
||||
procedure,
|
||||
metadata_store,
|
||||
data_home: datanode.storage.data_home.to_string(),
|
||||
frontend,
|
||||
datanode,
|
||||
logging,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -288,9 +304,12 @@ impl StartCommand {
|
||||
#[allow(unused_variables)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
||||
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
|
||||
let fe_opts = opts.fe_opts;
|
||||
let dn_opts = opts.dn_opts;
|
||||
let mut fe_opts = opts.frontend;
|
||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let dn_opts = opts.datanode;
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
info!(
|
||||
@@ -298,17 +317,22 @@ impl StartCommand {
|
||||
fe_opts, dn_opts
|
||||
);
|
||||
|
||||
let kv_dir = kv_store_dir(&opts.data_home);
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
||||
dir: &opts.data_home,
|
||||
})?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
||||
let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
kv_dir,
|
||||
opts.kv_store_cfg,
|
||||
opts.procedure_cfg,
|
||||
metadata_dir,
|
||||
opts.metadata_store,
|
||||
opts.procedure,
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
|
||||
let datanode =
|
||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), plugins.clone())
|
||||
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), Default::default())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
@@ -328,7 +352,7 @@ impl StartCommand {
|
||||
|
||||
// TODO: build frontend instance like in distributed mode
|
||||
let mut frontend = build_frontend(
|
||||
plugins,
|
||||
fe_plugins,
|
||||
kv_store,
|
||||
procedure_manager,
|
||||
catalog_manager,
|
||||
@@ -347,7 +371,7 @@ impl StartCommand {
|
||||
|
||||
/// Build frontend instance in standalone mode
|
||||
async fn build_frontend(
|
||||
plugins: Arc<Plugins>,
|
||||
plugins: Plugins,
|
||||
kv_store: KvBackendRef,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
@@ -381,13 +405,13 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_from_start_command_to_anymap() {
|
||||
let command = StartCommand {
|
||||
let mut fe_opts = FrontendOptions {
|
||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let plugins = load_frontend_plugins(&command.user_provider);
|
||||
let plugins = plugins.unwrap();
|
||||
let plugins = plugins::setup_frontend_plugins(&mut fe_opts).await.unwrap();
|
||||
|
||||
let provider = plugins.get::<UserProviderRef>().unwrap();
|
||||
let result = provider
|
||||
.authenticate(
|
||||
@@ -435,9 +459,9 @@ mod tests {
|
||||
checkpoint_margin = 9
|
||||
gc_duration = '7s'
|
||||
|
||||
[http_options]
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
timeout = "33s"
|
||||
body_limit = "128MB"
|
||||
|
||||
[logging]
|
||||
@@ -455,12 +479,12 @@ mod tests {
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let fe_opts = options.fe_opts;
|
||||
let dn_opts = options.dn_opts;
|
||||
let logging_opts = options.logging_opts;
|
||||
let fe_opts = options.frontend;
|
||||
let dn_opts = options.datanode;
|
||||
let logging_opts = options.logging;
|
||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
|
||||
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
|
||||
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.addr);
|
||||
assert!(fe_opts.mysql.enable);
|
||||
@@ -502,8 +526,8 @@ mod tests {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging_opts.dir);
|
||||
assert_eq!("debug", opts.logging_opts.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -547,7 +571,7 @@ mod tests {
|
||||
// http.addr = 127.0.0.1:24000
|
||||
[
|
||||
env_prefix.to_string(),
|
||||
"http_options".to_uppercase(),
|
||||
"http".to_uppercase(),
|
||||
"addr".to_uppercase(),
|
||||
]
|
||||
.join(ENV_VAR_SEP),
|
||||
@@ -572,18 +596,39 @@ mod tests {
|
||||
};
|
||||
|
||||
// Should be read from env, env > default values.
|
||||
assert_eq!(opts.logging_opts.dir, "/other/log/dir");
|
||||
assert_eq!(opts.logging.dir, "/other/log/dir");
|
||||
|
||||
// Should be read from config file, config file > env > default values.
|
||||
assert_eq!(opts.logging_opts.level.as_ref().unwrap(), "debug");
|
||||
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
|
||||
|
||||
// Should be read from cli, cli > config file > env > default values.
|
||||
assert_eq!(opts.fe_opts.http.addr, "127.0.0.1:14000");
|
||||
assert_eq!(ReadableSize::mb(64), opts.fe_opts.http.body_limit);
|
||||
assert_eq!(opts.frontend.http.addr, "127.0.0.1:14000");
|
||||
assert_eq!(ReadableSize::mb(64), opts.frontend.http.body_limit);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(opts.fe_opts.grpc.addr, GrpcOptions::default().addr);
|
||||
assert_eq!(opts.frontend.grpc.addr, GrpcOptions::default().addr);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_default_standalone_options() {
|
||||
let options: StandaloneOptions =
|
||||
Options::load_layered_options(None, "GREPTIMEDB_FRONTEND", None).unwrap();
|
||||
let default_options = StandaloneOptions::default();
|
||||
assert_eq!(options.mode, default_options.mode);
|
||||
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
||||
assert_eq!(options.http, default_options.http);
|
||||
assert_eq!(options.grpc, default_options.grpc);
|
||||
assert_eq!(options.mysql, default_options.mysql);
|
||||
assert_eq!(options.postgres, default_options.postgres);
|
||||
assert_eq!(options.opentsdb, default_options.opentsdb);
|
||||
assert_eq!(options.influxdb, default_options.influxdb);
|
||||
assert_eq!(options.prom_store, default_options.prom_store);
|
||||
assert_eq!(options.wal, default_options.wal);
|
||||
assert_eq!(options.metadata_store, default_options.metadata_store);
|
||||
assert_eq!(options.procedure, default_options.procedure);
|
||||
assert_eq!(options.logging, default_options.logging);
|
||||
assert_eq!(options.region_engine, default_options.region_engine);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ anymap = "1.0.0-beta.2"
|
||||
bitvec = "1.0"
|
||||
bytes = { version = "1.1", features = ["serde"] }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
paste = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -17,11 +17,13 @@ use std::io::{Read, Write};
|
||||
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_macro::stack_trace_debug;
|
||||
use paste::paste;
|
||||
use snafu::{ensure, Location, ResultExt, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display(
|
||||
"Destination buffer overflow, src_len: {}, dst_len: {}",
|
||||
@@ -37,9 +39,10 @@ pub enum Error {
|
||||
#[snafu(display("Buffer underflow"))]
|
||||
Underflow { location: Location },
|
||||
|
||||
#[snafu(display("IO operation reach EOF, source: {}", source))]
|
||||
#[snafu(display("IO operation reach EOF"))]
|
||||
Eof {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
pub use bit_vec::BitVec;
|
||||
|
||||
/// [`Plugins`] is a wrapper of Arc contents.
|
||||
/// Make it Cloneable and we can treat it like an Arc struct.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Plugins {
|
||||
inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json = "1.0"
|
||||
snafu = { version = "0.7", features = ["backtraces"] }
|
||||
|
||||
@@ -16,10 +16,12 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid full table name: {}", table_name))]
|
||||
InvalidFullTableName {
|
||||
|
||||
@@ -13,9 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use consts::DEFAULT_CATALOG_NAME;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
@@ -26,17 +23,6 @@ pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> Strin
|
||||
format!("{catalog}.{schema}.{table}")
|
||||
}
|
||||
|
||||
pub fn parse_full_table_name(table_name: &str) -> Result<(&str, &str, &str)> {
|
||||
let result = table_name.split('.').collect::<Vec<_>>();
|
||||
|
||||
ensure!(
|
||||
result.len() == 3,
|
||||
error::InvalidFullTableNameSnafu { table_name }
|
||||
);
|
||||
|
||||
Ok((result[0], result[1], result[2]))
|
||||
}
|
||||
|
||||
/// Build db name from catalog and schema string
|
||||
pub fn build_db_string(catalog: &str, schema: &str) -> String {
|
||||
if catalog == DEFAULT_CATALOG_NAME {
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::time::Duration;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct WalConfig {
|
||||
// wal file size in bytes
|
||||
@@ -45,11 +45,11 @@ impl Default for WalConfig {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kv_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/kv")
|
||||
pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||
format!("{store_dir}/metadata")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvStoreConfig {
|
||||
// Kv file size in bytes
|
||||
|
||||
@@ -18,10 +18,12 @@ async-compression = { version = "0.3", features = [
|
||||
async-trait.workspace = true
|
||||
bytes = "1.1"
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
datafusion.workspace = true
|
||||
derive_builder.workspace = true
|
||||
futures.workspace = true
|
||||
lazy_static.workspace = true
|
||||
object-store = { workspace = true }
|
||||
orc-rust = "0.2"
|
||||
paste = "1.0"
|
||||
|
||||
@@ -17,12 +17,14 @@ use std::any::Any;
|
||||
use arrow_schema::ArrowError;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::parquet::errors::ParquetError;
|
||||
use snafu::{Location, Snafu};
|
||||
use url::ParseError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
||||
UnsupportedCompressionType {
|
||||
@@ -30,10 +32,11 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported backend protocol: {}", protocol))]
|
||||
#[snafu(display("Unsupported backend protocol: {}, url: {}", protocol, url))]
|
||||
UnsupportedBackendProtocol {
|
||||
protocol: String,
|
||||
location: Location,
|
||||
url: String,
|
||||
},
|
||||
|
||||
#[snafu(display("Unsupported format protocol: {}", format))]
|
||||
@@ -42,95 +45,109 @@ pub enum Error {
|
||||
#[snafu(display("empty host: {}", url))]
|
||||
EmptyHostPath { url: String, location: Location },
|
||||
|
||||
#[snafu(display("Invalid url: {}, error :{}", url, source))]
|
||||
#[snafu(display("Invalid url: {}", url))]
|
||||
InvalidUrl {
|
||||
url: String,
|
||||
source: ParseError,
|
||||
#[snafu(source)]
|
||||
error: ParseError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build backend, source: {}", source))]
|
||||
#[snafu(display("Failed to build backend"))]
|
||||
BuildBackend {
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to build orc reader, source: {}", source))]
|
||||
#[snafu(display("Failed to build orc reader"))]
|
||||
OrcReader {
|
||||
location: Location,
|
||||
source: orc_rust::error::Error,
|
||||
#[snafu(source)]
|
||||
error: orc_rust::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read object from path: {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to read object from path: {}", path))]
|
||||
ReadObject {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write object to path: {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to write object to path: {}", path))]
|
||||
WriteObject {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write: {}", source))]
|
||||
#[snafu(display("Failed to write"))]
|
||||
AsyncWrite {
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to write record batch: {}", source))]
|
||||
#[snafu(display("Failed to write record batch"))]
|
||||
WriteRecordBatch {
|
||||
location: Location,
|
||||
source: ArrowError,
|
||||
#[snafu(source)]
|
||||
error: ArrowError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to encode record batch: {}", source))]
|
||||
#[snafu(display("Failed to encode record batch"))]
|
||||
EncodeRecordBatch {
|
||||
location: Location,
|
||||
source: ParquetError,
|
||||
#[snafu(source)]
|
||||
error: ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read record batch: {}", source))]
|
||||
#[snafu(display("Failed to read record batch"))]
|
||||
ReadRecordBatch {
|
||||
location: Location,
|
||||
source: datafusion::error::DataFusionError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::error::DataFusionError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to read parquet source: {}", source))]
|
||||
#[snafu(display("Failed to read parquet"))]
|
||||
ReadParquetSnafu {
|
||||
location: Location,
|
||||
source: datafusion::parquet::errors::ParquetError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert parquet to schema: {}", source))]
|
||||
#[snafu(display("Failed to convert parquet to schema"))]
|
||||
ParquetToSchema {
|
||||
location: Location,
|
||||
source: datafusion::parquet::errors::ParquetError,
|
||||
#[snafu(source)]
|
||||
error: datafusion::parquet::errors::ParquetError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to infer schema from file, source: {}", source))]
|
||||
#[snafu(display("Failed to infer schema from file"))]
|
||||
InferSchema {
|
||||
location: Location,
|
||||
source: arrow_schema::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: arrow_schema::ArrowError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list object in path: {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to list object in path: {}", path))]
|
||||
ListObjects {
|
||||
path: String,
|
||||
location: Location,
|
||||
source: object_store::Error,
|
||||
#[snafu(source)]
|
||||
error: object_store::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid connection: {}", msg))]
|
||||
InvalidConnection { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to join handle: {}", source))]
|
||||
#[snafu(display("Failed to join handle"))]
|
||||
JoinHandle {
|
||||
location: Location,
|
||||
source: tokio::task::JoinError,
|
||||
#[snafu(source)]
|
||||
error: tokio::task::JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
|
||||
@@ -140,9 +157,10 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to merge schema: {}", source))]
|
||||
#[snafu(display("Failed to merge schema"))]
|
||||
MergeSchema {
|
||||
source: arrow_schema::ArrowError,
|
||||
#[snafu(source)]
|
||||
error: arrow_schema::ArrowError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ pub enum Source {
|
||||
pub struct Lister {
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
root: String,
|
||||
regex: Option<Regex>,
|
||||
}
|
||||
|
||||
@@ -35,13 +35,13 @@ impl Lister {
|
||||
pub fn new(
|
||||
object_store: ObjectStore,
|
||||
source: Source,
|
||||
path: String,
|
||||
root: String,
|
||||
regex: Option<Regex>,
|
||||
) -> Self {
|
||||
Lister {
|
||||
object_store,
|
||||
source,
|
||||
path,
|
||||
root,
|
||||
regex,
|
||||
}
|
||||
}
|
||||
@@ -51,9 +51,9 @@ impl Lister {
|
||||
Source::Dir => {
|
||||
let streamer = self
|
||||
.object_store
|
||||
.lister_with(&self.path)
|
||||
.lister_with("/")
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?;
|
||||
.context(error::ListObjectsSnafu { path: &self.root })?;
|
||||
|
||||
streamer
|
||||
.try_filter(|f| {
|
||||
@@ -66,22 +66,21 @@ impl Lister {
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })
|
||||
.context(error::ListObjectsSnafu { path: &self.root })
|
||||
}
|
||||
Source::Filename(filename) => {
|
||||
// make sure this file exists
|
||||
let file_full_path = format!("{}{}", self.path, filename);
|
||||
let _ = self.object_store.stat(&file_full_path).await.context(
|
||||
let _ = self.object_store.stat(filename).await.with_context(|_| {
|
||||
error::ListObjectsSnafu {
|
||||
path: &file_full_path,
|
||||
},
|
||||
)?;
|
||||
path: format!("{}{}", &self.root, filename),
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(self
|
||||
.object_store
|
||||
.list_with(&self.path)
|
||||
.list_with("/")
|
||||
.await
|
||||
.context(error::ListObjectsSnafu { path: &self.path })?
|
||||
.context(error::ListObjectsSnafu { path: &self.root })?
|
||||
.into_iter()
|
||||
.find(|f| f.name() == filename)
|
||||
.map(|f| vec![f])
|
||||
|
||||
@@ -16,19 +16,29 @@ pub mod fs;
|
||||
pub mod s3;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use object_store::ObjectStore;
|
||||
use regex::Regex;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
use self::fs::build_fs_backend;
|
||||
use self::s3::build_s3_backend;
|
||||
use crate::error::{self, Result};
|
||||
use crate::util::find_dir_and_filename;
|
||||
|
||||
pub const FS_SCHEMA: &str = "FS";
|
||||
pub const S3_SCHEMA: &str = "S3";
|
||||
|
||||
/// Returns `(schema, Option<host>, path)`
|
||||
pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
// On Windows, the url may start with `C:/`.
|
||||
if let Some(_) = handle_windows_path(url) {
|
||||
return Ok((FS_SCHEMA.to_string(), None, url.to_string()));
|
||||
}
|
||||
}
|
||||
let parsed_url = Url::parse(url);
|
||||
match parsed_url {
|
||||
Ok(url) => Ok((
|
||||
@@ -44,17 +54,47 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
|
||||
}
|
||||
|
||||
pub fn build_backend(url: &str, connection: &HashMap<String, String>) -> Result<ObjectStore> {
|
||||
let (schema, host, _path) = parse_url(url)?;
|
||||
let (schema, host, path) = parse_url(url)?;
|
||||
let (root, _) = find_dir_and_filename(&path);
|
||||
|
||||
match schema.to_uppercase().as_str() {
|
||||
S3_SCHEMA => {
|
||||
let host = host.context(error::EmptyHostPathSnafu {
|
||||
url: url.to_string(),
|
||||
})?;
|
||||
Ok(build_s3_backend(&host, "/", connection)?)
|
||||
Ok(build_s3_backend(&host, &root, connection)?)
|
||||
}
|
||||
FS_SCHEMA => Ok(build_fs_backend("/")?),
|
||||
FS_SCHEMA => Ok(build_fs_backend(&root)?),
|
||||
|
||||
_ => error::UnsupportedBackendProtocolSnafu { protocol: schema }.fail(),
|
||||
_ => error::UnsupportedBackendProtocolSnafu {
|
||||
protocol: schema,
|
||||
url,
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref DISK_SYMBOL_PATTERN: Regex = Regex::new("^([A-Za-z]:/)").unwrap();
|
||||
}
|
||||
|
||||
pub fn handle_windows_path(url: &str) -> Option<String> {
|
||||
DISK_SYMBOL_PATTERN
|
||||
.captures(url)
|
||||
.map(|captures| captures[0].to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::handle_windows_path;
|
||||
|
||||
#[test]
|
||||
fn test_handle_windows_path() {
|
||||
assert_eq!(
|
||||
handle_windows_path("C:/to/path/file"),
|
||||
Some("C:/".to_string())
|
||||
);
|
||||
assert_eq!(handle_windows_path("https://google.com"), None);
|
||||
assert_eq!(handle_windows_path("s3://bucket/path/to"), None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,11 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
/// Extension to [`Error`](std::error::Error) in std.
|
||||
pub trait ErrorExt: std::error::Error {
|
||||
pub trait ErrorExt: StackError {
|
||||
/// Map this error to [StatusCode].
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::Unknown
|
||||
@@ -33,6 +34,63 @@ pub trait ErrorExt: std::error::Error {
|
||||
/// Returns the error as [Any](std::any::Any) so that it can be
|
||||
/// downcast to a specific implementation.
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
fn output_msg(&self) -> String
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let error = self.last();
|
||||
if let Some(external_error) = error.source() {
|
||||
let external_root = external_error.sources().last().unwrap();
|
||||
|
||||
if error.to_string().is_empty() {
|
||||
format!("{external_root}")
|
||||
} else {
|
||||
format!("{error}: {external_root}")
|
||||
}
|
||||
} else {
|
||||
format!("{error}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait StackError: std::error::Error {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>);
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError>;
|
||||
|
||||
fn last(&self) -> &dyn StackError
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let Some(mut result) = self.next() else {
|
||||
return self;
|
||||
};
|
||||
while let Some(err) = result.next() {
|
||||
result = err;
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + StackError> StackError for Arc<T> {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
self.as_ref().debug_fmt(layer, buf)
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
self.as_ref().next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: StackError> StackError for Box<T> {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
self.as_ref().debug_fmt(layer, buf)
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
self.as_ref().next()
|
||||
}
|
||||
}
|
||||
|
||||
/// An opaque boxed error based on errors that implement [ErrorExt] trait.
|
||||
@@ -90,6 +148,16 @@ impl crate::snafu::ErrorCompat for BoxedError {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for BoxedError {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
self.inner.debug_fmt(layer, buf)
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
self.inner.next()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type with plain error message
|
||||
#[derive(Debug)]
|
||||
pub struct PlainError {
|
||||
@@ -128,3 +196,13 @@ impl crate::ext::ErrorExt for PlainError {
|
||||
self as _
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for PlainError {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
buf.push(format!("{}: {}", layer, self.msg))
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ mod tests {
|
||||
use snafu::{GenerateImplicitData, Location};
|
||||
|
||||
use super::*;
|
||||
use crate::ext::StackError;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("This is a leaf error"))]
|
||||
@@ -65,6 +66,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for Leaf {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("This is a leaf with location"))]
|
||||
struct LeafWithLocation {
|
||||
@@ -81,6 +90,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for LeafWithLocation {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(display("Internal error"))]
|
||||
struct Internal {
|
||||
@@ -99,6 +116,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for Internal {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
buf.push(format!("{}: Internal error, at {}", layer, self.location));
|
||||
self.source.debug_fmt(layer + 1, buf);
|
||||
}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
Some(&self.source)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let err = Leaf;
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![feature(error_iter)]
|
||||
|
||||
pub mod ext;
|
||||
pub mod format;
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::fmt;
|
||||
|
||||
use snafu::Location;
|
||||
|
||||
use crate::ext::ErrorExt;
|
||||
use crate::ext::{ErrorExt, StackError};
|
||||
use crate::status_code::StatusCode;
|
||||
|
||||
/// A mock error mainly for test.
|
||||
@@ -69,3 +69,11 @@ impl ErrorExt for MockError {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl StackError for MockError {
|
||||
fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
|
||||
|
||||
fn next(&self) -> Option<&dyn StackError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::sync::Arc;
|
||||
mod greatest;
|
||||
mod to_unixtime;
|
||||
|
||||
use greatest::GreatestFunction;
|
||||
use to_unixtime::ToUnixtimeFunction;
|
||||
|
||||
use crate::scalars::function_registry::FunctionRegistry;
|
||||
@@ -23,5 +25,6 @@ pub(crate) struct TimestampFunction;
|
||||
impl TimestampFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(ToUnixtimeFunction));
|
||||
registry.register(Arc::new(GreatestFunction));
|
||||
}
|
||||
}
|
||||
|
||||
175
src/common/function/src/scalars/timestamp/greatest.rs
Normal file
175
src/common/function/src/scalars/timestamp/greatest.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{self};
|
||||
|
||||
use common_query::error::{
|
||||
self, ArrowComputeSnafu, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::comparison::gt_dyn;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{DataType as ArrowDataType, Date32Type};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{Helper, VectorRef};
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::scalars::function::{Function, FunctionContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct GreatestFunction;
|
||||
|
||||
const NAME: &str = "greatest";
|
||||
|
||||
impl Function for GreatestFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::date_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(
|
||||
2,
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date32)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date32)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => {
|
||||
let column1 = columns[0].to_arrow_array();
|
||||
let column1 = column1.as_primitive::<Date32Type>();
|
||||
let column2 = columns[1].to_arrow_array();
|
||||
let column2 = column2.as_primitive::<Date32Type>();
|
||||
let boolean_array = gt_dyn(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GreatestFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "GREATEST")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::Date;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::DateType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{DateVector, StringVector, Vector};
|
||||
|
||||
use super::GreatestFunction;
|
||||
use crate::scalars::function::FunctionContext;
|
||||
use crate::scalars::Function;
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_string_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
"1970-01-01".to_string(),
|
||||
"2012-12-23".to_string(),
|
||||
])) as _,
|
||||
Arc::new(StringVector::from(vec![
|
||||
"2001-02-01".to_string(),
|
||||
"1999-01-01".to_string(),
|
||||
])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str("2001-02-01").unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str("2012-12-23").unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_greatest_takes_date_vector() {
|
||||
let function = GreatestFunction;
|
||||
assert_eq!(
|
||||
function.return_type(&[]).unwrap(),
|
||||
ConcreteDataType::Date(DateType)
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(DateVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function.eval(FunctionContext::default(), &columns).unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateVector>().unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::Date(Date::from_str("1970-01-01").unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::Date(Date::from_str("1970-01-03").unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ async-trait.workspace = true
|
||||
common-base = { workspace = true }
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-query = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
common-time = { workspace = true }
|
||||
|
||||
@@ -16,15 +16,17 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal delete request, reason: {reason}"))]
|
||||
IllegalDeleteRequest { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Column datatype error, source: {}", source))]
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
@@ -49,7 +51,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Invalid column proto: {}", err_msg))]
|
||||
InvalidColumnProto { err_msg: String, location: Location },
|
||||
#[snafu(display("Failed to create vector, source: {}", source))]
|
||||
#[snafu(display("Failed to create vector"))]
|
||||
CreateVector {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
@@ -58,11 +60,7 @@ pub enum Error {
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"Invalid column proto definition, column: {}, source: {}",
|
||||
column,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Invalid column proto definition, column: {}", column))]
|
||||
InvalidColumnDef {
|
||||
column: String,
|
||||
location: Location,
|
||||
|
||||
@@ -11,6 +11,7 @@ async-trait = "0.1"
|
||||
backtrace = "0.3"
|
||||
common-base = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
common-telemetry = { workspace = true }
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_telemetry::info;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
use dashmap::DashMap;
|
||||
@@ -30,8 +31,9 @@ use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsCon
|
||||
|
||||
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
|
||||
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 10;
|
||||
pub const DEFAULT_MAX_GRPC_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
|
||||
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 1;
|
||||
pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: ReadableSize = ReadableSize::mb(512);
|
||||
pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: ReadableSize = ReadableSize::mb(512);
|
||||
|
||||
lazy_static! {
|
||||
static ref ID: AtomicU64 = AtomicU64::new(0);
|
||||
@@ -248,9 +250,10 @@ pub struct ChannelConfig {
|
||||
pub tcp_keepalive: Option<Duration>,
|
||||
pub tcp_nodelay: bool,
|
||||
pub client_tls: Option<ClientTlsOption>,
|
||||
// Max gRPC message size
|
||||
// TODO(dennis): make it configurable
|
||||
pub max_message_size: usize,
|
||||
// Max gRPC receiving(decoding) message size
|
||||
pub max_recv_message_size: ReadableSize,
|
||||
// Max gRPC sending(encoding) message size
|
||||
pub max_send_message_size: ReadableSize,
|
||||
}
|
||||
|
||||
impl Default for ChannelConfig {
|
||||
@@ -269,7 +272,8 @@ impl Default for ChannelConfig {
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
client_tls: None,
|
||||
max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
|
||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -534,7 +538,8 @@ mod tests {
|
||||
tcp_keepalive: None,
|
||||
tcp_nodelay: true,
|
||||
client_tls: None,
|
||||
max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
|
||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
},
|
||||
default_cfg
|
||||
);
|
||||
@@ -577,7 +582,8 @@ mod tests {
|
||||
client_cert_path: "some_cert_path".to_string(),
|
||||
client_key_path: "some_key_path".to_string(),
|
||||
}),
|
||||
max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
|
||||
max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
|
||||
max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
},
|
||||
cfg
|
||||
);
|
||||
|
||||
@@ -17,19 +17,22 @@ use std::io;
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Invalid client tls config, {}", msg))]
|
||||
InvalidTlsConfig { msg: String },
|
||||
|
||||
#[snafu(display("Invalid config file path, {}", source))]
|
||||
#[snafu(display("Invalid config file path"))]
|
||||
InvalidConfigFilePath {
|
||||
source: io::Error,
|
||||
#[snafu(source)]
|
||||
error: io::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -46,13 +49,14 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create gRPC channel, source: {}", source))]
|
||||
#[snafu(display("Failed to create gRPC channel"))]
|
||||
CreateChannel {
|
||||
source: tonic::transport::Error,
|
||||
#[snafu(source)]
|
||||
error: tonic::transport::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create RecordBatch, source: {}", source))]
|
||||
#[snafu(display("Failed to create RecordBatch"))]
|
||||
CreateRecordBatch {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
@@ -61,16 +65,17 @@ pub enum Error {
|
||||
#[snafu(display("Failed to convert Arrow type: {}", from))]
|
||||
Conversion { from: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to decode FlightData, source: {}", source))]
|
||||
#[snafu(display("Failed to decode FlightData"))]
|
||||
DecodeFlightData {
|
||||
source: api::DecodeError,
|
||||
#[snafu(source)]
|
||||
error: api::DecodeError,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid FlightData, reason: {}", reason))]
|
||||
InvalidFlightData { reason: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to convert Arrow Schema, source: {}", source))]
|
||||
#[snafu(display("Failed to convert Arrow Schema"))]
|
||||
ConvertArrowSchema {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
|
||||
@@ -13,6 +13,15 @@ common-telemetry = { workspace = true }
|
||||
proc-macro2 = "1.0.66"
|
||||
quote = "1.0"
|
||||
syn = "1.0"
|
||||
syn2 = { version = "2.0", package = "syn", features = [
|
||||
"derive",
|
||||
"parsing",
|
||||
"printing",
|
||||
"clone-impls",
|
||||
"proc-macro",
|
||||
"extra-traits",
|
||||
"full",
|
||||
] }
|
||||
|
||||
[dev-dependencies]
|
||||
arc-swap = "1.0"
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
mod aggr_func;
|
||||
mod print_caller;
|
||||
mod range_fn;
|
||||
mod stack_trace_debug;
|
||||
|
||||
use aggr_func::{impl_aggr_func_type_store, impl_as_aggr_func_creator};
|
||||
use print_caller::process_print_caller;
|
||||
@@ -87,3 +88,23 @@ pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
process_print_caller(args, input)
|
||||
}
|
||||
|
||||
/// Attribute macro to derive [std::fmt::Debug] for the annotated `Error` type.
|
||||
///
|
||||
/// The generated `Debug` implementation will print the error in a stack trace style. E.g.:
|
||||
/// ```plaintext
|
||||
/// 0: Foo error, at src/common/catalog/src/error.rs:80:10
|
||||
/// 1: Bar error, at src/common/function/src/error.rs:90:10
|
||||
/// 2: Root cause, invalid table name, at src/common/catalog/src/error.rs:100:10
|
||||
/// ```
|
||||
///
|
||||
/// Notes on using this macro:
|
||||
/// - `#[snafu(display)]` must present on each enum variants,
|
||||
/// and should not include `location` and `source`.
|
||||
/// - Only our internal error can be named `source`.
|
||||
/// All external error should be `error` with an `#[snafu(source)]` annotation.
|
||||
/// - `common_error` crate must be accessible.
|
||||
#[proc_macro_attribute]
|
||||
pub fn stack_trace_debug(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
stack_trace_debug::stack_trace_style_impl(args.into(), input.into()).into()
|
||||
}
|
||||
|
||||
278
src/common/macro/src/stack_trace_debug.rs
Normal file
278
src/common/macro/src/stack_trace_debug.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! implement `::common_error::ext::StackError`
|
||||
|
||||
use proc_macro2::{Span, TokenStream as TokenStream2};
|
||||
use quote::{quote, quote_spanned};
|
||||
use syn2::spanned::Spanned;
|
||||
use syn2::{parenthesized, Attribute, Ident, ItemEnum, Variant};
|
||||
|
||||
pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 {
|
||||
let input_cloned: TokenStream2 = input.clone();
|
||||
|
||||
let error_enum_definition: ItemEnum = syn2::parse2(input_cloned).unwrap();
|
||||
let enum_name = error_enum_definition.ident;
|
||||
|
||||
let mut variants = vec![];
|
||||
|
||||
for error_variant in error_enum_definition.variants {
|
||||
let variant = ErrorVariant::from_enum_variant(error_variant);
|
||||
variants.push(variant);
|
||||
}
|
||||
|
||||
let debug_fmt_fn = build_debug_fmt_impl(enum_name.clone(), variants.clone());
|
||||
let next_fn = build_next_impl(enum_name.clone(), variants);
|
||||
let debug_impl = build_debug_impl(enum_name.clone());
|
||||
|
||||
quote! {
|
||||
#args
|
||||
#input
|
||||
|
||||
impl ::common_error::ext::StackError for #enum_name {
|
||||
#debug_fmt_fn
|
||||
#next_fn
|
||||
}
|
||||
|
||||
#debug_impl
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate `debug_fmt` fn.
|
||||
///
|
||||
/// The generated fn will be like:
|
||||
/// ```rust, ignore
|
||||
/// fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>);
|
||||
/// ```
|
||||
fn build_debug_fmt_impl(enum_name: Ident, variants: Vec<ErrorVariant>) -> TokenStream2 {
|
||||
let match_arms = variants
|
||||
.iter()
|
||||
.map(|v| v.to_debug_match_arm())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
quote! {
|
||||
fn debug_fmt(&self, layer: usize, buf: &mut Vec<String>) {
|
||||
use #enum_name::*;
|
||||
match self {
|
||||
#(#match_arms)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate `next` fn.
|
||||
///
|
||||
/// The generated fn will be like:
|
||||
/// ```rust, ignore
|
||||
/// fn next(&self) -> Option<&dyn ::common_error::ext::StackError>;
|
||||
/// ```
|
||||
fn build_next_impl(enum_name: Ident, variants: Vec<ErrorVariant>) -> TokenStream2 {
|
||||
let match_arms = variants
|
||||
.iter()
|
||||
.map(|v| v.to_next_match_arm())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
quote! {
|
||||
fn next(&self) -> Option<&dyn ::common_error::ext::StackError> {
|
||||
use #enum_name::*;
|
||||
match self {
|
||||
#(#match_arms)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement [std::fmt::Debug] via `debug_fmt`
|
||||
fn build_debug_impl(enum_name: Ident) -> TokenStream2 {
|
||||
quote! {
|
||||
impl std::fmt::Debug for #enum_name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
use ::common_error::ext::StackError;
|
||||
let mut buf = vec![];
|
||||
self.debug_fmt(0, &mut buf);
|
||||
write!(f, "{}", buf.join("\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ErrorVariant {
|
||||
name: Ident,
|
||||
fields: Vec<Ident>,
|
||||
has_location: bool,
|
||||
has_source: bool,
|
||||
has_external_cause: bool,
|
||||
display: TokenStream2,
|
||||
span: Span,
|
||||
cfg_attr: Option<Attribute>,
|
||||
}
|
||||
|
||||
impl ErrorVariant {
|
||||
/// Construct self from [Variant]
|
||||
fn from_enum_variant(variant: Variant) -> Self {
|
||||
let span = variant.span();
|
||||
let mut has_location = false;
|
||||
let mut has_source = false;
|
||||
let mut has_external_cause = false;
|
||||
|
||||
for field in &variant.fields {
|
||||
if let Some(ident) = &field.ident {
|
||||
if ident == "location" {
|
||||
has_location = true;
|
||||
} else if ident == "source" {
|
||||
has_source = true;
|
||||
} else if ident == "error" {
|
||||
has_external_cause = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut display = None;
|
||||
let mut cfg_attr = None;
|
||||
for attr in variant.attrs {
|
||||
if attr.path().is_ident("snafu") {
|
||||
attr.parse_nested_meta(|meta| {
|
||||
if meta.path.is_ident("display") {
|
||||
let content;
|
||||
parenthesized!(content in meta.input);
|
||||
let display_ts: TokenStream2 = content.parse()?;
|
||||
display = Some(display_ts);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(meta.error("unrecognized repr"))
|
||||
}
|
||||
})
|
||||
.expect("Each error should contains a display attribute");
|
||||
}
|
||||
|
||||
if attr.path().is_ident("cfg") {
|
||||
cfg_attr = Some(attr);
|
||||
}
|
||||
}
|
||||
|
||||
let field_ident = variant
|
||||
.fields
|
||||
.iter()
|
||||
.map(|f| f.ident.clone().unwrap_or_else(|| Ident::new("_", f.span())))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
name: variant.ident,
|
||||
fields: field_ident,
|
||||
has_location,
|
||||
has_source,
|
||||
has_external_cause,
|
||||
display: display.unwrap(),
|
||||
span,
|
||||
cfg_attr,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert self into an match arm that will be used in [build_debug_impl].
|
||||
///
|
||||
/// The generated match arm will be like:
|
||||
/// ```rust, ignore
|
||||
/// ErrorKindWithSource { source, .. } => {
|
||||
/// debug_fmt(source, layer + 1, buf);
|
||||
/// },
|
||||
/// ErrorKindWithoutSource { .. } => {
|
||||
/// buf.push(format!("{layer}: {}, at {}", format!(#display), location)));
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The generated code assumes fn `debug_fmt`, var `layer`, var `buf` are in scope.
|
||||
fn to_debug_match_arm(&self) -> TokenStream2 {
|
||||
let name = &self.name;
|
||||
let fields = &self.fields;
|
||||
let display = &self.display;
|
||||
let cfg = if let Some(cfg) = &self.cfg_attr {
|
||||
quote_spanned!(cfg.span() => #cfg)
|
||||
} else {
|
||||
quote! {}
|
||||
};
|
||||
|
||||
match (self.has_location, self.has_source, self.has_external_cause) {
|
||||
(true, true, _) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),*, } => {
|
||||
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
|
||||
source.debug_fmt(layer + 1, buf);
|
||||
},
|
||||
},
|
||||
(true, false, true) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
|
||||
buf.push(format!("{}: {:?}", layer + 1, error));
|
||||
},
|
||||
},
|
||||
(true, false, false) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}, at {}", format!(#display), location));
|
||||
},
|
||||
},
|
||||
(false, true, _) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}", format!(#display)));
|
||||
source.debug_fmt(layer + 1, buf);
|
||||
},
|
||||
},
|
||||
(false, false, true) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}", format!(#display)));
|
||||
buf.push(format!("{}: {:?}", layer + 1, error));
|
||||
},
|
||||
},
|
||||
(false, false, false) => quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
buf.push(format!("{layer}: {}", format!(#display)));
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert self into an match arm that will be used in [build_next_impl].
|
||||
///
|
||||
/// The generated match arm will be like:
|
||||
/// ```rust, ignore
|
||||
/// ErrorKindWithSource { source, .. } => {
|
||||
/// Some(source)
|
||||
/// },
|
||||
/// ErrorKindWithoutSource { .. } => {
|
||||
/// None
|
||||
/// }
|
||||
/// ```
|
||||
fn to_next_match_arm(&self) -> TokenStream2 {
|
||||
let name = &self.name;
|
||||
let fields = &self.fields;
|
||||
let cfg = if let Some(cfg) = &self.cfg_attr {
|
||||
quote_spanned!(cfg.span() => #cfg)
|
||||
} else {
|
||||
quote! {}
|
||||
};
|
||||
|
||||
if self.has_source {
|
||||
quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } => {
|
||||
Some(source)
|
||||
},
|
||||
}
|
||||
} else {
|
||||
quote_spanned! {
|
||||
self.span => #cfg #[allow(unused_variables)] #name { #(#fields),* } =>{
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-error = { workspace = true }
|
||||
common-macro = { workspace = true }
|
||||
snafu.workspace = true
|
||||
tempfile = "3.4"
|
||||
tokio.workspace = true
|
||||
|
||||
@@ -16,14 +16,16 @@ use std::any::Any;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::Snafu;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("{source}"))]
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal { source: BoxedError },
|
||||
|
||||
#[snafu(display("Memory profiling is not supported"))]
|
||||
|
||||
@@ -17,13 +17,18 @@ use std::path::PathBuf;
|
||||
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to read OPT_PROF, source: {}", source))]
|
||||
ReadOptProf { source: tikv_jemalloc_ctl::Error },
|
||||
#[snafu(display("Failed to read OPT_PROF"))]
|
||||
ReadOptProf {
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Memory profiling is not enabled"))]
|
||||
ProfilingNotEnabled,
|
||||
@@ -31,20 +36,18 @@ pub enum Error {
|
||||
#[snafu(display("Failed to build temp file from given path: {:?}", path))]
|
||||
BuildTempPath { path: PathBuf, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open temp file: {}, source: {}", path, source))]
|
||||
#[snafu(display("Failed to open temp file: {}", path))]
|
||||
OpenTempFile {
|
||||
path: String,
|
||||
source: std::io::Error,
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to dump profiling data to temp file: {:?}, source: {}",
|
||||
path,
|
||||
source
|
||||
))]
|
||||
#[snafu(display("Failed to dump profiling data to temp file: {:?}", path))]
|
||||
DumpProfileData {
|
||||
path: PathBuf,
|
||||
source: tikv_jemalloc_ctl::Error,
|
||||
#[snafu(source)]
|
||||
error: tikv_jemalloc_ctl::Error,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,12 @@ api = { workspace = true }
|
||||
arrow-flight.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64 = "0.21"
|
||||
bytes = "1.4"
|
||||
common-catalog = { workspace = true }
|
||||
common-error = { workspace = true }
|
||||
common-grpc-expr.workspace = true
|
||||
common-macro = { workspace = true }
|
||||
common-procedure = { workspace = true }
|
||||
common-recordbatch = { workspace = true }
|
||||
common-runtime = { workspace = true }
|
||||
|
||||
@@ -17,8 +17,27 @@ use std::sync::Arc;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::table_info::TableInfoKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteKey;
|
||||
use crate::key::TableMetaKey;
|
||||
use crate::table_name::TableName;
|
||||
|
||||
/// KvBackend cache invalidator
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvCacheInvalidator: Send + Sync {
|
||||
async fn invalidate_key(&self, key: &[u8]);
|
||||
}
|
||||
|
||||
pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
|
||||
|
||||
pub struct DummyKvCacheInvalidator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvCacheInvalidator for DummyKvCacheInvalidator {
|
||||
async fn invalidate_key(&self, _key: &[u8]) {}
|
||||
}
|
||||
|
||||
/// Places context of invalidating cache. e.g., span id, trace id etc.
|
||||
#[derive(Default)]
|
||||
pub struct Context {
|
||||
@@ -47,3 +66,27 @@ impl CacheInvalidator for DummyCacheInvalidator {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> CacheInvalidator for T
|
||||
where
|
||||
T: KvCacheInvalidator,
|
||||
{
|
||||
async fn invalidate_table_name(&self, _ctx: &Context, table_name: TableName) -> Result<()> {
|
||||
let key: TableNameKey = (&table_name).into();
|
||||
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn invalidate_table_id(&self, _ctx: &Context, table_id: TableId) -> Result<()> {
|
||||
let key = TableInfoKey::new(table_id);
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
|
||||
let key = &TableRouteKey { table_id };
|
||||
self.invalidate_key(&key.as_raw_key()).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ use crate::rpc::router::RegionRoute;
|
||||
pub mod alter_table;
|
||||
pub mod create_table;
|
||||
pub mod drop_table;
|
||||
pub mod truncate_table;
|
||||
pub mod utils;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
||||
@@ -45,6 +45,7 @@ use crate::error::{
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders};
|
||||
@@ -63,7 +64,7 @@ impl AlterTableProcedure {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: AlterTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
let alter_kind = task
|
||||
@@ -191,7 +192,8 @@ impl AlterTableProcedure {
|
||||
.await?
|
||||
.with_context(|| TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let leaders = find_leaders(®ion_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
@@ -413,7 +415,7 @@ pub struct AlterTableData {
|
||||
state: AlterTableState,
|
||||
task: AlterTableTask,
|
||||
/// Table info value before alteration.
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
cluster_id: u64,
|
||||
/// Next column id of the table if the task adds columns to the table.
|
||||
next_column_id: Option<ColumnId>,
|
||||
@@ -422,7 +424,7 @@ pub struct AlterTableData {
|
||||
impl AlterTableData {
|
||||
pub fn new(
|
||||
task: AlterTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
cluster_id: u64,
|
||||
next_column_id: Option<ColumnId>,
|
||||
) -> Self {
|
||||
|
||||
@@ -161,7 +161,6 @@ impl CreateTableProcedure {
|
||||
engine: create_table_expr.engine.to_string(),
|
||||
column_defs,
|
||||
primary_key,
|
||||
create_if_not_exists: true,
|
||||
path: String::new(),
|
||||
options: create_table_expr.table_options.clone(),
|
||||
})
|
||||
@@ -200,8 +199,8 @@ impl CreateTableProcedure {
|
||||
for request in requests {
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: 0,
|
||||
span_id: 0,
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(request),
|
||||
};
|
||||
|
||||
@@ -39,6 +39,7 @@ use crate::error::{self, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::DropTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
@@ -55,8 +56,8 @@ impl DropTableProcedure {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: DropTableTask,
|
||||
table_route_value: TableRouteValue,
|
||||
table_info_value: TableInfoValue,
|
||||
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -156,8 +157,8 @@ impl DropTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: 0,
|
||||
span_id: 0,
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
@@ -231,16 +232,16 @@ pub struct DropTableData {
|
||||
pub state: DropTableState,
|
||||
pub cluster_id: u64,
|
||||
pub task: DropTableTask,
|
||||
pub table_route_value: TableRouteValue,
|
||||
pub table_info_value: TableInfoValue,
|
||||
pub table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
pub table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
}
|
||||
|
||||
impl DropTableData {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: DropTableTask,
|
||||
table_route_value: TableRouteValue,
|
||||
table_info_value: TableInfoValue,
|
||||
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: DropTableState::Prepare,
|
||||
|
||||
235
src/common/meta/src/ddl/truncate_table.rs
Normal file
235
src/common/meta/src/ddl/truncate_table.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::region::{
|
||||
region_request, RegionRequest, RegionRequestHeader, TruncateRequest as PbTruncateRegionRequest,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::debug;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::utils::handle_retry_error;
|
||||
use crate::ddl::utils::handle_operate_region_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::TruncateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
pub struct TruncateTableProcedure {
|
||||
context: DdlContext,
|
||||
data: TruncateTableData,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for TruncateTableProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_TRUNCATE_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
|
||||
match self.data.state {
|
||||
TruncateTableState::Prepare => self.on_prepare().await,
|
||||
TruncateTableState::DatanodeTruncateRegions => {
|
||||
self.on_datanode_truncate_regions().await
|
||||
}
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let table_ref = &self.data.table_ref();
|
||||
let key = common_catalog::format_full_table_name(
|
||||
table_ref.catalog,
|
||||
table_ref.schema,
|
||||
table_ref.table,
|
||||
);
|
||||
|
||||
LockKey::single(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl TruncateTableProcedure {
|
||||
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
|
||||
|
||||
pub(crate) fn new(
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let table_ref = &self.data.table_ref();
|
||||
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let exist = manager
|
||||
.table_name_manager()
|
||||
.exists(TableNameKey::new(
|
||||
table_ref.catalog,
|
||||
table_ref.schema,
|
||||
table_ref.table,
|
||||
))
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
exist,
|
||||
TableNotFoundSnafu {
|
||||
table_name: table_ref.to_string()
|
||||
}
|
||||
);
|
||||
|
||||
self.data.state = TruncateTableState::DatanodeTruncateRegions;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_datanode_truncate_regions(&mut self) -> Result<Status> {
|
||||
let table_id = self.data.table_id();
|
||||
|
||||
let region_routes = &self.data.region_routes;
|
||||
let leaders = find_leaders(region_routes);
|
||||
let mut truncate_region_tasks = Vec::with_capacity(leaders.len());
|
||||
|
||||
for datanode in leaders {
|
||||
let requester = self.context.datanode_manager.datanode(&datanode).await;
|
||||
let regions = find_leader_regions(region_routes, &datanode);
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
debug!(
|
||||
"Truncating table {} region {} on Datanode {:?}",
|
||||
self.data.table_ref(),
|
||||
region_id,
|
||||
datanode
|
||||
);
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
})),
|
||||
};
|
||||
|
||||
let datanode = datanode.clone();
|
||||
let requester = requester.clone();
|
||||
|
||||
truncate_region_tasks.push(async move {
|
||||
if let Err(err) = requester.handle(request).await {
|
||||
return Err(handle_operate_region_error(datanode)(err));
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
join_all(truncate_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct TruncateTableData {
|
||||
state: TruncateTableState,
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
}
|
||||
|
||||
impl TruncateTableData {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: TruncateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
self.task.table_ref()
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
self.task.table_name()
|
||||
}
|
||||
|
||||
fn table_info(&self) -> &RawTableInfo {
|
||||
&self.table_info_value.table_info
|
||||
}
|
||||
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_info().ident.table_id
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
enum TruncateTableState {
|
||||
/// Prepares to truncate the table
|
||||
Prepare,
|
||||
/// Truncates regions on Datanode
|
||||
DatanodeTruncateRegions,
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
@@ -23,18 +23,19 @@ use crate::datanode_manager::DatanodeManagerRef;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{
|
||||
DdlContext, DdlTaskExecutor, ExecutorContext, TableMetadataAllocatorContext,
|
||||
TableMetadataAllocatorRef,
|
||||
};
|
||||
use crate::error::{
|
||||
self, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu, TableNotFoundSnafu,
|
||||
UnsupportedSnafu, WaitProcedureSnafu,
|
||||
WaitProcedureSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use crate::rpc::ddl::DdlTask::{AlterTable, CreateTable, DropTable, TruncateTable};
|
||||
use crate::rpc::ddl::{
|
||||
AlterTableTask, CreateTableTask, DropTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse,
|
||||
@@ -122,6 +123,20 @@ impl DdlManager {
|
||||
)
|
||||
.context(RegisterProcedureLoaderSnafu {
|
||||
type_name: AlterTableProcedure::TYPE_NAME,
|
||||
})?;
|
||||
|
||||
let context = self.create_context();
|
||||
|
||||
self.procedure_manager
|
||||
.register_loader(
|
||||
TruncateTableProcedure::TYPE_NAME,
|
||||
Box::new(move |json| {
|
||||
let context = context.clone();
|
||||
TruncateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
|
||||
}),
|
||||
)
|
||||
.context(RegisterProcedureLoaderSnafu {
|
||||
type_name: TruncateTableProcedure::TYPE_NAME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -129,7 +144,7 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
alter_table_task: AlterTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<ProcedureId> {
|
||||
let context = self.create_context();
|
||||
|
||||
@@ -161,8 +176,8 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
drop_table_task: DropTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
table_route_value: TableRouteValue,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
|
||||
) -> Result<ProcedureId> {
|
||||
let context = self.create_context();
|
||||
|
||||
@@ -183,15 +198,21 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Result<ProcedureId> {
|
||||
error!("Truncate table procedure is not supported, cluster_id = {}, truncate_table_task = {:?}, region_routes = {:?}",
|
||||
cluster_id, truncate_table_task, region_routes);
|
||||
let context = self.create_context();
|
||||
let procedure = TruncateTableProcedure::new(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
context,
|
||||
);
|
||||
|
||||
UnsupportedSnafu {
|
||||
operation: "TRUNCATE TABLE",
|
||||
}
|
||||
.fail()
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
async fn submit_procedure(&self, procedure_with_id: ProcedureWithId) -> Result<ProcedureId> {
|
||||
@@ -216,32 +237,34 @@ async fn handle_truncate_table_task(
|
||||
cluster_id: u64,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let truncate_table = &truncate_table_task.truncate_table;
|
||||
let table_id = truncate_table
|
||||
.table_id
|
||||
.as_ref()
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "expected table id ",
|
||||
})?
|
||||
.id;
|
||||
|
||||
let table_id = truncate_table_task.table_id;
|
||||
let table_metadata_manager = &ddl_manager.table_metadata_manager();
|
||||
let table_ref = truncate_table_task.table_ref();
|
||||
|
||||
let table_route_value = ddl_manager
|
||||
.table_metadata_manager()
|
||||
.table_route_manager()
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| error::TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
let (table_info_value, table_route_value) =
|
||||
table_metadata_manager.get_full_table_info(table_id).await?;
|
||||
|
||||
let table_route = table_route_value.region_routes;
|
||||
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let table_route = table_route_value.into_inner().region_routes;
|
||||
|
||||
let id = ddl_manager
|
||||
.submit_truncate_table_task(cluster_id, truncate_table_task, table_route)
|
||||
.submit_truncate_table_task(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
table_route,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is truncated via procedure_id {id:?}");
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: id.to_string().into(),
|
||||
..Default::default()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user