mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 12:22:55 +00:00
Compare commits
25 Commits
v0.12.0-ni
...
poc-write-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68593ae92a | ||
|
|
91d755d9b5 | ||
|
|
2566d254ad | ||
|
|
0ec4ed804d | ||
|
|
cc435234a4 | ||
|
|
9c4aa81f85 | ||
|
|
bdbb5435ea | ||
|
|
fd9940a253 | ||
|
|
e0bafd661c | ||
|
|
99baa86b6a | ||
|
|
76d69901ea | ||
|
|
764a57b80a | ||
|
|
95b388d819 | ||
|
|
c2b556e321 | ||
|
|
06ebe6b3fb | ||
|
|
bec8245e75 | ||
|
|
3cb2343f7f | ||
|
|
d10c207371 | ||
|
|
1a73a40bd9 | ||
|
|
713a73e9b2 | ||
|
|
65a88a63db | ||
|
|
5ad1436a8f | ||
|
|
ae59206caf | ||
|
|
094d0fcdf5 | ||
|
|
7170120de6 |
@@ -47,11 +47,7 @@ runs:
|
||||
password: ${{ inputs.image-registry-password }}
|
||||
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -17,8 +17,6 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/dependency-check.yml
vendored
2
.github/workflows/dependency-check.yml
vendored
@@ -12,8 +12,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Rust
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
|
||||
25
.github/workflows/dev-build.yml
vendored
25
.github/workflows/dev-build.yml
vendored
@@ -76,9 +76,15 @@ env:
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
IMAGE_NAME: greptimedb-dev
|
||||
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -101,7 +107,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -156,7 +161,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v4
|
||||
@@ -164,7 +168,6 @@ jobs:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
persist-credentials: true
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -189,7 +192,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v4
|
||||
@@ -197,7 +199,6 @@ jobs:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
persist-credentials: true
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -225,14 +226,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
@@ -257,14 +257,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -292,7 +291,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -318,7 +316,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -337,16 +334,10 @@ jobs:
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
|
||||
32
.github/workflows/develop.yml
vendored
32
.github/workflows/develop.yml
vendored
@@ -26,8 +26,6 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: crate-ci/typos@master
|
||||
- name: Check the config docs
|
||||
run: |
|
||||
@@ -40,8 +38,6 @@ jobs:
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
@@ -53,8 +49,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -76,8 +70,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
@@ -93,8 +85,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -149,8 +139,6 @@ jobs:
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -204,8 +192,6 @@ jobs:
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -252,8 +238,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -311,8 +295,6 @@ jobs:
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- if: matrix.mode.minio
|
||||
@@ -455,8 +437,6 @@ jobs:
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- name: Setup Chaos Mesh
|
||||
@@ -582,8 +562,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures
|
||||
@@ -611,8 +589,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -628,8 +604,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -652,8 +626,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Merge Conflict Finder
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
@@ -664,8 +636,6 @@ jobs:
|
||||
needs: [conflict-check, clippy, fmt]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -714,8 +684,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
9
.github/workflows/docbot.yml
vendored
9
.github/workflows/docbot.yml
vendored
@@ -3,17 +3,16 @@ on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Maybe Follow Up Docs Issue
|
||||
working-directory: cyborg
|
||||
|
||||
4
.github/workflows/docs.yml
vendored
4
.github/workflows/docs.yml
vendored
@@ -34,8 +34,6 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
@@ -43,8 +41,6 @@ jobs:
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
|
||||
22
.github/workflows/nightly-build.yml
vendored
22
.github/workflows/nightly-build.yml
vendored
@@ -66,6 +66,13 @@ env:
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
# The DockerHub image will be greptime/greptimedb-nightly.
|
||||
IMAGE_NAME: greptimedb-nightly
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -88,7 +95,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -141,7 +147,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -163,7 +168,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -189,14 +193,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
@@ -223,14 +226,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -258,7 +260,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -284,7 +285,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -303,14 +303,10 @@ jobs:
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
issues: write
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
|
||||
19
.github/workflows/nightly-ci.yml
vendored
19
.github/workflows/nightly-ci.yml
vendored
@@ -9,6 +9,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
@@ -19,7 +22,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check install.sh
|
||||
run: ./.github/scripts/check-install-script.sh
|
||||
@@ -44,14 +46,9 @@ jobs:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-2022-8-cores
|
||||
permissions:
|
||||
issues: write
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
@@ -79,9 +76,6 @@ jobs:
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
@@ -117,13 +111,9 @@ jobs:
|
||||
cleanbuild-linux-nix:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: cachix/install-nix-action@v27
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-24.11
|
||||
@@ -151,9 +141,6 @@ jobs:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
|
||||
@@ -37,7 +37,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Configure build image version
|
||||
id: set-version
|
||||
@@ -86,66 +85,48 @@ jobs:
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest
|
||||
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest
|
||||
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest
|
||||
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
|
||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -163,41 +144,29 @@ jobs:
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
||||
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -93,6 +93,11 @@ env:
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.12.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -117,7 +122,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
@@ -177,7 +181,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -199,7 +202,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -235,7 +237,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
@@ -275,7 +276,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
@@ -306,14 +306,12 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
@@ -343,14 +341,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -380,7 +377,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Publish GitHub release
|
||||
uses: ./.github/actions/publish-github-release
|
||||
@@ -404,7 +400,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -430,7 +425,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -447,15 +441,8 @@ jobs:
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners]
|
||||
runs-on: ubuntu-20.04
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump doc version
|
||||
working-directory: cyborg
|
||||
@@ -474,17 +461,10 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
|
||||
10
.github/workflows/schedule.yml
vendored
10
.github/workflows/schedule.yml
vendored
@@ -4,20 +4,18 @@ on:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
maintenance:
|
||||
name: Periodic Maintenance
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
|
||||
4
.github/workflows/semantic-pull-request.yml
vendored
4
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
@@ -13,8 +13,6 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
|
||||
725
Cargo.lock
generated
725
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@@ -81,7 +81,6 @@ rust.unknown_lints = "deny"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||
@@ -107,7 +106,6 @@ bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
bytes = { version = "1.7", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
chrono-tz = "0.10.1"
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
@@ -129,7 +127,8 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "fc09a5696608d2a0aa718cc835d5cb9c4e8e9387" }
|
||||
# branch: poc-write-path
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1915576b113a494f5352fd61f211d899b7f87aab" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -140,8 +139,8 @@ itertools = "0.10"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||
loki-api = { git = "https://github.com/shuiyisong/tracing-loki", branch = "chore/prost_version" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
@@ -209,7 +208,6 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"]
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
@@ -281,10 +279,12 @@ tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "46
|
||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||
# Apply a fix for pprof for unaligned pointer access
|
||||
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
|
||||
|
||||
[build]
|
||||
pre-build = [
|
||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||
@@ -8,8 +5,3 @@ pre-build = [
|
||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||
]
|
||||
|
||||
[build.env]
|
||||
passthrough = [
|
||||
"JEMALLOC_SYS_WITH_LG_PAGE",
|
||||
]
|
||||
|
||||
@@ -116,7 +116,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
--rpc-bind-addr 0.0.0.0:4001 \
|
||||
--rpc-addr 0.0.0.0:4001 \
|
||||
--mysql-addr 0.0.0.0:4002 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
```
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
@@ -40,7 +40,6 @@
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -50,7 +49,6 @@
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -60,8 +58,6 @@
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
@@ -69,8 +65,8 @@
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -92,9 +88,8 @@
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
||||
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. |
|
||||
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
|
||||
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
||||
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
@@ -226,8 +221,8 @@
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1:4001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
@@ -238,7 +233,6 @@
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -248,7 +242,6 @@
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
@@ -258,8 +251,6 @@
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
@@ -309,7 +300,7 @@
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||
@@ -385,14 +376,19 @@
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
||||
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
|
||||
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1:3001` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
@@ -553,8 +549,8 @@
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
|
||||
@@ -19,6 +19,26 @@ init_regions_parallelism = 16
|
||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||
max_concurrent_queries = 0
|
||||
|
||||
## Deprecated, use `grpc.addr` instead.
|
||||
## @toml2docs:none-default
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
|
||||
## Deprecated, use `grpc.hostname` instead.
|
||||
## @toml2docs:none-default
|
||||
rpc_hostname = "127.0.0.1"
|
||||
|
||||
## Deprecated, use `grpc.runtime_size` instead.
|
||||
## @toml2docs:none-default
|
||||
rpc_runtime_size = 8
|
||||
|
||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
||||
## @toml2docs:none-default
|
||||
rpc_max_recv_message_size = "512MB"
|
||||
|
||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
||||
## @toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
@@ -36,11 +56,10 @@ body_limit = "64MB"
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:3001"
|
||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||
server_addr = "127.0.0.1:3001"
|
||||
addr = "127.0.0.1:3001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1:3001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
|
||||
@@ -14,10 +14,10 @@ node_id = 14
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:6800"
|
||||
## The address advertised to the metasrv,
|
||||
addr = "127.0.0.1:6800"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
server_addr = "127.0.0.1:6800"
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## The maximum receive message size for gRPC server.
|
||||
|
||||
@@ -41,11 +41,10 @@ cors_allowed_origins = ["https://example.com"]
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:4001"
|
||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||
server_addr = "127.0.0.1:4001"
|
||||
addr = "127.0.0.1:4001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
@@ -74,9 +73,6 @@ enable = true
|
||||
addr = "127.0.0.1:4002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
@@ -108,9 +104,6 @@ enable = true
|
||||
addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
@@ -138,11 +131,6 @@ enable = true
|
||||
## Whether to enable InfluxDB protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Jaeger protocol options.
|
||||
[jaeger]
|
||||
## Whether to enable Jaeger protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Prometheus remote storage options
|
||||
[prom_store]
|
||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||
|
||||
@@ -4,9 +4,7 @@ data_home = "/tmp/metasrv/"
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
|
||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
|
||||
@@ -49,7 +49,7 @@ cors_allowed_origins = ["https://example.com"]
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:4001"
|
||||
addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
@@ -78,9 +78,6 @@ enable = true
|
||||
addr = "127.0.0.1:4002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
@@ -112,9 +109,6 @@ enable = true
|
||||
addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
@@ -142,11 +136,6 @@ enable = true
|
||||
## Whether to enable InfluxDB protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Jaeger protocol options.
|
||||
[jaeger]
|
||||
## Whether to enable Jaeger protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Prometheus remote storage options
|
||||
[prom_store]
|
||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||
@@ -170,11 +159,11 @@ dir = "/tmp/greptimedb/wal"
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a purge.
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a purge.
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "1m"
|
||||
|
||||
@@ -289,12 +278,10 @@ overwrite_entry_start_id = false
|
||||
|
||||
## Metadata storage options.
|
||||
[metadata_store]
|
||||
## The size of the metadata store log file.
|
||||
file_size = "64MB"
|
||||
## The threshold of the metadata store size to trigger a purge.
|
||||
purge_threshold = "256MB"
|
||||
## The interval of the metadata store to trigger a purge.
|
||||
purge_interval = "1m"
|
||||
## Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
## Kv purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
|
||||
@@ -43,8 +43,8 @@ services:
|
||||
command:
|
||||
- metasrv
|
||||
- start
|
||||
- --rpc-bind-addr=0.0.0.0:3002
|
||||
- --rpc-server-addr=metasrv:3002
|
||||
- --bind-addr=0.0.0.0:3002
|
||||
- --server-addr=metasrv:3002
|
||||
- --store-addrs=etcd0:2379
|
||||
- --http-addr=0.0.0.0:3000
|
||||
healthcheck:
|
||||
@@ -68,8 +68,8 @@ services:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --rpc-bind-addr=0.0.0.0:3001
|
||||
- --rpc-server-addr=datanode0:3001
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
@@ -98,7 +98,7 @@ services:
|
||||
- start
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:4000
|
||||
- --rpc-bind-addr=0.0.0.0:4001
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
healthcheck:
|
||||
@@ -123,8 +123,8 @@ services:
|
||||
- start
|
||||
- --node-id=0
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --rpc-bind-addr=0.0.0.0:4004
|
||||
- --rpc-server-addr=flownode0:4004
|
||||
- --rpc-addr=0.0.0.0:4004
|
||||
- --rpc-hostname=flownode0:4004
|
||||
- --http-addr=0.0.0.0:4005
|
||||
depends_on:
|
||||
frontend0:
|
||||
|
||||
@@ -4,16 +4,6 @@ This crate provides an easy approach to dump memory profiling info.
|
||||
|
||||
## Prerequisites
|
||||
### jemalloc
|
||||
jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
|
||||
```
|
||||
# find jeprof binary
|
||||
find . -name 'jeprof'
|
||||
# add executable permission
|
||||
chmod +x <path_to_jeprof>
|
||||
```
|
||||
The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
|
||||
The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
|
||||
You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
|
||||
```bash
|
||||
# for macOS
|
||||
brew install jemalloc
|
||||
@@ -33,11 +23,7 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
||||
Start GreptimeDB instance with environment variables:
|
||||
|
||||
```bash
|
||||
# for Linux
|
||||
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
|
||||
# for macOS
|
||||
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||
```
|
||||
|
||||
Dump memory profiling data through HTTP API:
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 36 KiB |
BIN
docs/logo-text-padding.png
Normal file → Executable file
BIN
docs/logo-text-padding.png
Normal file → Executable file
Binary file not shown.
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 25 KiB |
@@ -15,10 +15,10 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexType,
|
||||
COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
||||
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||
};
|
||||
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType};
|
||||
use greptime_proto::v1::Analyzer;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -121,13 +121,6 @@ pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
|
||||
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
|
||||
match skipping_index_type {
|
||||
PbSkippingIndexType::BloomFilter => SkippingIndexType::BloomFilter,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -228,6 +228,12 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
let keys = &table_info.meta.primary_key_indices;
|
||||
let schema = table.schema();
|
||||
|
||||
// For compatibility, use primary key columns as inverted index columns.
|
||||
let pk_as_inverted_index = !schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.any(|c| c.has_inverted_index_key());
|
||||
|
||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||
let mut constraints = vec![];
|
||||
if column.is_time_index() {
|
||||
@@ -245,6 +251,10 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
||||
// TODO(dimbtp): foreign key constraint not supported yet
|
||||
if keys.contains(&idx) {
|
||||
constraints.push(PRI_CONSTRAINT_NAME);
|
||||
|
||||
if pk_as_inverted_index {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
}
|
||||
}
|
||||
if column.is_inverted_indexed() {
|
||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||
|
||||
@@ -24,11 +24,10 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::rds::PgStore;
|
||||
use common_meta::kv_backend::postgres::PgStore;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
use common_telemetry::info;
|
||||
use common_wal::options::WalOptions;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||
use rand::Rng;
|
||||
@@ -185,7 +184,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
region_routes
|
||||
}
|
||||
|
||||
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, WalOptions> {
|
||||
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, String> {
|
||||
// TODO(niebayes): construct region wal options for benchmark.
|
||||
let _ = regions;
|
||||
HashMap::default()
|
||||
|
||||
@@ -49,12 +49,7 @@ impl TableMetadataBencher {
|
||||
|
||||
let regions: Vec<_> = (0..64).collect();
|
||||
let region_routes = create_region_routes(regions.clone());
|
||||
let region_wal_options = create_region_wal_options(regions)
|
||||
.into_iter()
|
||||
.map(|(region_id, wal_options)| {
|
||||
(region_id, serde_json::to_string(&wal_options).unwrap())
|
||||
})
|
||||
.collect();
|
||||
let region_wal_options = create_region_wal_options(regions);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
@@ -114,17 +109,9 @@ impl TableMetadataBencher {
|
||||
let table_info = table_info.unwrap();
|
||||
let table_route = table_route.unwrap();
|
||||
let table_id = table_info.table_info.ident.table_id;
|
||||
|
||||
let regions: Vec<_> = (0..64).collect();
|
||||
let region_wal_options = create_region_wal_options(regions);
|
||||
let _ = self
|
||||
.table_metadata_manager
|
||||
.delete_table_metadata(
|
||||
table_id,
|
||||
&table_info.table_name(),
|
||||
&table_route,
|
||||
®ion_wal_options,
|
||||
)
|
||||
.delete_table_metadata(table_id, &table_info.table_name(), &table_route)
|
||||
.await;
|
||||
start.elapsed()
|
||||
},
|
||||
|
||||
@@ -126,14 +126,10 @@ impl SubCommand {
|
||||
struct StartCommand {
|
||||
#[clap(long)]
|
||||
node_id: Option<u64>,
|
||||
/// The address to bind the gRPC server.
|
||||
#[clap(long, alias = "rpc-addr")]
|
||||
rpc_bind_addr: Option<String>,
|
||||
/// The address advertised to the metasrv, and used for connections from outside the host.
|
||||
/// If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
|
||||
#[clap(long, alias = "rpc-hostname")]
|
||||
rpc_server_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
@@ -185,18 +181,18 @@ impl StartCommand {
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
} else if let Some(addr) = &opts.rpc_addr {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead.");
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(server_addr) = &self.rpc_server_addr {
|
||||
opts.grpc.server_addr.clone_from(server_addr);
|
||||
} else if let Some(server_addr) = &opts.rpc_hostname {
|
||||
if let Some(hostname) = &self.rpc_hostname {
|
||||
opts.grpc.hostname.clone_from(hostname);
|
||||
} else if let Some(hostname) = &opts.rpc_hostname {
|
||||
warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead.");
|
||||
opts.grpc.server_addr.clone_from(server_addr);
|
||||
opts.grpc.hostname.clone_from(hostname);
|
||||
}
|
||||
|
||||
if let Some(runtime_size) = opts.rpc_runtime_size {
|
||||
@@ -281,7 +277,7 @@ impl StartCommand {
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
opts.grpc.detect_hostname();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
@@ -361,8 +357,8 @@ mod tests {
|
||||
rpc_addr = "127.0.0.1:4001"
|
||||
rpc_hostname = "192.168.0.1"
|
||||
[grpc]
|
||||
bind_addr = "127.0.0.1:3001"
|
||||
server_addr = "127.0.0.1"
|
||||
addr = "127.0.0.1:3001"
|
||||
hostname = "127.0.0.1"
|
||||
runtime_size = 8
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
@@ -373,8 +369,8 @@ mod tests {
|
||||
};
|
||||
|
||||
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||
assert_eq!("127.0.0.1:4001".to_string(), options.grpc.bind_addr);
|
||||
assert_eq!("192.168.0.1".to_string(), options.grpc.server_addr);
|
||||
assert_eq!("127.0.0.1:4001".to_string(), options.grpc.addr);
|
||||
assert_eq!("192.168.0.1".to_string(), options.grpc.hostname);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -435,7 +431,7 @@ mod tests {
|
||||
|
||||
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.grpc.bind_addr);
|
||||
assert_eq!("127.0.0.1:3001".to_string(), options.grpc.addr);
|
||||
assert_eq!(Some(42), options.node_id);
|
||||
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = options.wal else {
|
||||
@@ -649,7 +645,7 @@ mod tests {
|
||||
opts.http.addr,
|
||||
DatanodeOptions::default().component.http.addr
|
||||
);
|
||||
assert_eq!(opts.grpc.server_addr, "10.103.174.219");
|
||||
assert_eq!(opts.grpc.hostname, "10.103.174.219");
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -129,13 +129,11 @@ struct StartCommand {
|
||||
#[clap(long)]
|
||||
node_id: Option<u64>,
|
||||
/// Bind address for the gRPC server.
|
||||
#[clap(long, alias = "rpc-addr")]
|
||||
rpc_bind_addr: Option<String>,
|
||||
/// The address advertised to the metasrv, and used for connections from outside the host.
|
||||
/// If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
|
||||
#[clap(long, alias = "rpc-hostname")]
|
||||
rpc_server_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
/// Hostname for the gRPC server.
|
||||
#[clap(long)]
|
||||
rpc_hostname: Option<String>,
|
||||
/// Metasrv address list;
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
metasrv_addrs: Option<Vec<String>>,
|
||||
@@ -186,12 +184,12 @@ impl StartCommand {
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(server_addr) = &self.rpc_server_addr {
|
||||
opts.grpc.server_addr.clone_from(server_addr);
|
||||
if let Some(hostname) = &self.rpc_hostname {
|
||||
opts.grpc.hostname.clone_from(hostname);
|
||||
}
|
||||
|
||||
if let Some(node_id) = self.node_id {
|
||||
@@ -239,7 +237,7 @@ impl StartCommand {
|
||||
info!("Flownode options: {:#?}", opts);
|
||||
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
opts.grpc.detect_hostname();
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
@@ -136,19 +136,13 @@ impl SubCommand {
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
pub struct StartCommand {
|
||||
/// The address to bind the gRPC server.
|
||||
#[clap(long, alias = "rpc-addr")]
|
||||
rpc_bind_addr: Option<String>,
|
||||
/// The address advertised to the metasrv, and used for connections from outside the host.
|
||||
/// If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
|
||||
#[clap(long, alias = "rpc-hostname")]
|
||||
rpc_server_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
http_timeout: Option<u64>,
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
@@ -224,15 +218,11 @@ impl StartCommand {
|
||||
opts.http.disable_dashboard = disable_dashboard;
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
opts.grpc.bind_addr.clone_from(addr);
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
opts.grpc.addr.clone_from(addr);
|
||||
opts.grpc.tls = tls_opts.clone();
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_server_addr {
|
||||
opts.grpc.server_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
opts.mysql.enable = true;
|
||||
opts.mysql.addr.clone_from(addr);
|
||||
@@ -279,7 +269,7 @@ impl StartCommand {
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
opts.grpc.detect_hostname();
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
@@ -423,7 +413,7 @@ mod tests {
|
||||
|
||||
let default_opts = FrontendOptions::default().component;
|
||||
|
||||
assert_eq!(opts.grpc.bind_addr, default_opts.grpc.bind_addr);
|
||||
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
|
||||
assert!(opts.mysql.enable);
|
||||
assert_eq!(opts.mysql.runtime_size, default_opts.mysql.runtime_size);
|
||||
assert!(opts.postgres.enable);
|
||||
@@ -614,7 +604,7 @@ mod tests {
|
||||
assert_eq!(fe_opts.http.addr, "127.0.0.1:14000");
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(fe_opts.grpc.bind_addr, GrpcOptions::default().bind_addr);
|
||||
assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -133,15 +133,11 @@ impl SubCommand {
|
||||
|
||||
#[derive(Debug, Default, Parser)]
|
||||
struct StartCommand {
|
||||
/// The address to bind the gRPC server.
|
||||
#[clap(long, alias = "bind-addr")]
|
||||
rpc_bind_addr: Option<String>,
|
||||
/// The communication server address for the frontend and datanode to connect to metasrv.
|
||||
/// If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
|
||||
#[clap(long, alias = "server-addr")]
|
||||
rpc_server_addr: Option<String>,
|
||||
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
|
||||
#[clap(long)]
|
||||
bind_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
server_addr: Option<String>,
|
||||
#[clap(long, aliases = ["store-addr"], value_delimiter = ',', num_args = 1..)]
|
||||
store_addrs: Option<Vec<String>>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
@@ -205,11 +201,11 @@ impl StartCommand {
|
||||
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||
};
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
if let Some(addr) = &self.bind_addr {
|
||||
opts.bind_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_server_addr {
|
||||
if let Some(addr) = &self.server_addr {
|
||||
opts.server_addr.clone_from(addr);
|
||||
}
|
||||
|
||||
@@ -273,13 +269,11 @@ impl StartCommand {
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.detect_server_addr();
|
||||
|
||||
info!("Metasrv options: {:#?}", opts);
|
||||
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
@@ -312,8 +306,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_read_from_cmd() {
|
||||
let cmd = StartCommand {
|
||||
rpc_bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
rpc_server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
|
||||
selector: Some("LoadBased".to_string()),
|
||||
..Default::default()
|
||||
@@ -387,8 +381,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
let cmd = StartCommand {
|
||||
rpc_bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
rpc_server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
|
||||
selector: Some("LoadBased".to_string()),
|
||||
..Default::default()
|
||||
|
||||
@@ -60,8 +60,7 @@ use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::server::Services;
|
||||
use frontend::service_config::{
|
||||
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, PostgresOptions,
|
||||
PromStoreOptions,
|
||||
InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
|
||||
};
|
||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use mito2::config::MitoConfig;
|
||||
@@ -141,7 +140,6 @@ pub struct StandaloneOptions {
|
||||
pub postgres: PostgresOptions,
|
||||
pub opentsdb: OpentsdbOptions,
|
||||
pub influxdb: InfluxdbOptions,
|
||||
pub jaeger: JaegerOptions,
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub wal: DatanodeWalConfig,
|
||||
pub storage: StorageConfig,
|
||||
@@ -171,7 +169,6 @@ impl Default for StandaloneOptions {
|
||||
postgres: PostgresOptions::default(),
|
||||
opentsdb: OpentsdbOptions::default(),
|
||||
influxdb: InfluxdbOptions::default(),
|
||||
jaeger: JaegerOptions::default(),
|
||||
prom_store: PromStoreOptions::default(),
|
||||
wal: DatanodeWalConfig::default(),
|
||||
storage: StorageConfig::default(),
|
||||
@@ -220,7 +217,6 @@ impl StandaloneOptions {
|
||||
postgres: cloned_opts.postgres,
|
||||
opentsdb: cloned_opts.opentsdb,
|
||||
influxdb: cloned_opts.influxdb,
|
||||
jaeger: cloned_opts.jaeger,
|
||||
prom_store: cloned_opts.prom_store,
|
||||
meta_client: None,
|
||||
logging: cloned_opts.logging,
|
||||
@@ -333,8 +329,8 @@ impl App for Instance {
|
||||
pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
http_addr: Option<String>,
|
||||
#[clap(long, alias = "rpc-addr")]
|
||||
rpc_bind_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
rpc_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
@@ -411,9 +407,9 @@ impl StartCommand {
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.rpc_bind_addr {
|
||||
if let Some(addr) = &self.rpc_addr {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().grpc.bind_addr;
|
||||
let datanode_grpc_addr = DatanodeOptions::default().grpc.addr;
|
||||
if addr.eq(&datanode_grpc_addr) {
|
||||
return IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
@@ -421,7 +417,7 @@ impl StartCommand {
|
||||
),
|
||||
}.fail();
|
||||
}
|
||||
opts.grpc.bind_addr.clone_from(addr)
|
||||
opts.grpc.addr.clone_from(addr)
|
||||
}
|
||||
|
||||
if let Some(addr) = &self.mysql_addr {
|
||||
@@ -468,7 +464,7 @@ impl StartCommand {
|
||||
let mut plugins = Plugins::new();
|
||||
let plugin_opts = opts.plugins;
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
opts.grpc.detect_hostname();
|
||||
let fe_opts = opts.frontend_options();
|
||||
let dn_opts = opts.datanode_options();
|
||||
|
||||
@@ -490,8 +486,8 @@ impl StartCommand {
|
||||
let metadata_dir = metadata_store_dir(data_home);
|
||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
metadata_dir,
|
||||
opts.metadata_store,
|
||||
opts.procedure,
|
||||
opts.metadata_store.clone(),
|
||||
opts.procedure.clone(),
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
@@ -911,7 +907,7 @@ mod tests {
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
|
||||
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
|
||||
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.bind_addr);
|
||||
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.addr);
|
||||
assert!(fe_opts.mysql.enable);
|
||||
assert_eq!("127.0.0.1:4002", fe_opts.mysql.addr);
|
||||
assert_eq!(2, fe_opts.mysql.runtime_size);
|
||||
@@ -1041,7 +1037,7 @@ mod tests {
|
||||
assert_eq!(ReadableSize::mb(64), fe_opts.http.body_limit);
|
||||
|
||||
// Should be default value.
|
||||
assert_eq!(fe_opts.grpc.bind_addr, GrpcOptions::default().bind_addr);
|
||||
assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ mod tests {
|
||||
.args([
|
||||
"datanode",
|
||||
"start",
|
||||
"--rpc-bind-addr=0.0.0.0:4321",
|
||||
"--rpc-addr=0.0.0.0:4321",
|
||||
"--node-id=1",
|
||||
&format!("--data-home={}", data_home.path().display()),
|
||||
&format!("--wal-dir={}", wal_dir.path().display()),
|
||||
@@ -80,7 +80,7 @@ mod tests {
|
||||
"--log-level=off",
|
||||
"cli",
|
||||
"attach",
|
||||
"--grpc-bind-addr=0.0.0.0:4321",
|
||||
"--grpc-addr=0.0.0.0:4321",
|
||||
// history commands can sneaky into stdout and mess up our tests, so disable it
|
||||
"--disable-helper",
|
||||
]);
|
||||
|
||||
@@ -17,6 +17,9 @@ use std::time::Duration;
|
||||
use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_config::Configurable;
|
||||
use common_grpc::channel_manager::{
|
||||
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
|
||||
};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
@@ -88,8 +91,13 @@ fn test_load_datanode_example_config() {
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default()
|
||||
.with_bind_addr("127.0.0.1:3001")
|
||||
.with_server_addr("127.0.0.1:3001"),
|
||||
.with_addr("127.0.0.1:3001")
|
||||
.with_hostname("127.0.0.1:3001"),
|
||||
rpc_addr: Some("127.0.0.1:3001".to_string()),
|
||||
rpc_hostname: Some("127.0.0.1".to_string()),
|
||||
rpc_runtime_size: Some(8),
|
||||
rpc_max_recv_message_size: Some(DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE),
|
||||
rpc_max_send_message_size: Some(DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -136,9 +144,7 @@ fn test_load_frontend_example_config() {
|
||||
remote_write: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
grpc: GrpcOptions::default()
|
||||
.with_bind_addr("127.0.0.1:4001")
|
||||
.with_server_addr("127.0.0.1:4001"),
|
||||
grpc: GrpcOptions::default().with_hostname("127.0.0.1:4001"),
|
||||
http: HttpOptions {
|
||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||
..Default::default()
|
||||
|
||||
@@ -12,11 +12,9 @@ common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
config.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
snafu.workspace = true
|
||||
sysinfo.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
@@ -16,8 +16,6 @@ pub mod config;
|
||||
pub mod error;
|
||||
pub mod utils;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use common_base::readable_size::ReadableSize;
|
||||
pub use config::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -36,27 +34,22 @@ pub enum Mode {
|
||||
Distributed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(default)]
|
||||
pub struct KvBackendConfig {
|
||||
/// The size of the metadata store backend log file.
|
||||
// Kv file size in bytes
|
||||
pub file_size: ReadableSize,
|
||||
/// The threshold of the metadata store size to trigger a purge.
|
||||
// Kv purge threshold in bytes
|
||||
pub purge_threshold: ReadableSize,
|
||||
/// The interval of the metadata store to trigger a purge.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub purge_interval: Duration,
|
||||
}
|
||||
|
||||
impl Default for KvBackendConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
// The log file size 64MB
|
||||
file_size: ReadableSize::mb(64),
|
||||
// The log purge threshold 256MB
|
||||
purge_threshold: ReadableSize::mb(256),
|
||||
// The log purge interval 1m
|
||||
purge_interval: Duration::from_secs(60),
|
||||
// log file size 256MB
|
||||
file_size: ReadableSize::mb(256),
|
||||
// purge threshold 4GB
|
||||
purge_threshold: ReadableSize::gb(4),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
pub mod json_get;
|
||||
mod json_get;
|
||||
mod json_is;
|
||||
mod json_path_exists;
|
||||
mod json_path_match;
|
||||
|
||||
@@ -20,12 +20,11 @@ pub mod impl_conv;
|
||||
pub(crate) mod product;
|
||||
mod scalar_add;
|
||||
mod scalar_mul;
|
||||
mod sub;
|
||||
pub(crate) mod sum;
|
||||
mod vector_add;
|
||||
mod vector_div;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
mod vector_sub;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -49,11 +48,10 @@ impl VectorFunction {
|
||||
registry.register(Arc::new(scalar_mul::ScalarMulFunction));
|
||||
|
||||
// vector calculation
|
||||
registry.register(Arc::new(vector_add::VectorAddFunction));
|
||||
registry.register(Arc::new(vector_sub::VectorSubFunction));
|
||||
registry.register(Arc::new(vector_mul::VectorMulFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(sub::SubFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
|
||||
@@ -42,10 +42,19 @@ const NAME: &str = "vec_sub";
|
||||
/// | [0,-1] |
|
||||
/// +---------------------------------------------------------------+
|
||||
///
|
||||
/// -- Negative scalar to simulate subtraction
|
||||
/// SELECT vec_to_string(vec_sub('[-1.0, -1.0]', '[1.0, 2.0]'));
|
||||
///
|
||||
/// +-----------------------------------------------------------------+
|
||||
/// | vec_to_string(vec_sub(Utf8("[-1.0, -1.0]"),Utf8("[1.0, 2.0]"))) |
|
||||
/// +-----------------------------------------------------------------+
|
||||
/// | [-2,-3] |
|
||||
/// +-----------------------------------------------------------------+
|
||||
///
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorSubFunction;
|
||||
pub struct SubFunction;
|
||||
|
||||
impl Function for VectorSubFunction {
|
||||
impl Function for SubFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
@@ -133,7 +142,7 @@ impl Function for VectorSubFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorSubFunction {
|
||||
impl Display for SubFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
@@ -150,7 +159,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_sub() {
|
||||
let func = VectorSubFunction;
|
||||
let func = SubFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
@@ -185,7 +194,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_sub_error() {
|
||||
let func = VectorSubFunction;
|
||||
let func = SubFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
@@ -1,214 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::InvalidFuncArgsSnafu;
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use nalgebra::DVectorView;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_add";
|
||||
|
||||
/// Adds corresponding elements of two vectors, returns a vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_add("[1.0, 1.0]", "[1.0, 2.0]")) as result;
|
||||
///
|
||||
/// +---------------------------------------------------------------+
|
||||
/// | vec_to_string(vec_add(Utf8("[1.0, 1.0]"),Utf8("[1.0, 2.0]"))) |
|
||||
/// +---------------------------------------------------------------+
|
||||
/// | [2,3] |
|
||||
/// +---------------------------------------------------------------+
|
||||
///
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorAddFunction;
|
||||
|
||||
impl Function for VectorAddFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(
|
||||
&self,
|
||||
_func_ctx: FunctionContext,
|
||||
columns: &[VectorRef],
|
||||
) -> common_query::error::Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
let arg1_const = as_veclit_if_const(arg1)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = match arg1_const.as_ref() {
|
||||
Some(arg1) => Some(Cow::Borrowed(arg1.as_ref())),
|
||||
None => as_veclit(arg1.get_ref(i))?,
|
||||
};
|
||||
let (Some(arg0), Some(arg1)) = (arg0, arg1) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
let vec0 = DVectorView::from_slice(&arg0, arg0.len());
|
||||
let vec1 = DVectorView::from_slice(&arg1, arg1.len());
|
||||
|
||||
let vec_res = vec0 + vec1;
|
||||
let veclit = vec_res.as_slice();
|
||||
let binlit = veclit_to_binlit(veclit);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorAddFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::StringVector;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sub() {
|
||||
let func = VectorAddFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
Some("[2.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[6.0,5.0,4.0]".to_string()),
|
||||
Some("[3.0,2.0,2.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
|
||||
let result = func
|
||||
.eval(FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 3.0, 4.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[10.0, 10.0, 10.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sub_error() {
|
||||
let func = VectorAddFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
None,
|
||||
Some("[2.0,3.0,3.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,1.0,1.0]".to_string()),
|
||||
Some("[6.0,5.0,4.0]".to_string()),
|
||||
Some("[3.0,2.0,2.0]".to_string()),
|
||||
]));
|
||||
|
||||
let result = func.eval(FunctionContext::default(), &[input0, input1]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 4, args 1: 3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,14 +15,13 @@
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::add_column_location::LocationType;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::column_def::{as_fulltext_option, as_skipping_index_type};
|
||||
use api::v1::column_def::as_fulltext_option;
|
||||
use api::v1::{
|
||||
column_def, AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr,
|
||||
DropColumns, ModifyColumnTypes, RenameTable, SemanticType,
|
||||
SkippingIndexType as PbSkippingIndexType,
|
||||
};
|
||||
use common_query::AddColumnLocation;
|
||||
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema, SkippingIndexOptions};
|
||||
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
||||
use table::metadata::TableId;
|
||||
@@ -32,8 +31,7 @@ use table::requests::{
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
};
|
||||
@@ -139,18 +137,6 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
column_name: i.column_name,
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Skipping(s) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
options: SkippingIndexOptions {
|
||||
granularity: s.granularity as u32,
|
||||
index_type: as_skipping_index_type(
|
||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
@@ -166,11 +152,6 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
column_name: i.column_name,
|
||||
},
|
||||
},
|
||||
api::v1::unset_index::Options::Skipping(s) => AlterKind::UnsetIndex {
|
||||
options: UnsetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
},
|
||||
},
|
||||
},
|
||||
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||
},
|
||||
|
||||
@@ -140,14 +140,6 @@ pub enum Error {
|
||||
error: prost::UnknownEnumValue,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid set skipping index option request"))]
|
||||
InvalidSetSkippingIndexOptionRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::UnknownEnumValue,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing alter index options"))]
|
||||
MissingAlterIndexOption {
|
||||
#[snafu(implicit)]
|
||||
@@ -179,7 +171,6 @@ impl ErrorExt for Error {
|
||||
Error::InvalidSetTableOptionRequest { .. }
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. }
|
||||
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
||||
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,12 +14,37 @@
|
||||
|
||||
use api::helper;
|
||||
use api::v1::column::Values;
|
||||
use api::v1::{Column, CreateTableExpr};
|
||||
use common_base::BitVec;
|
||||
use datatypes::data_type::{ConcreteDataType, DataType};
|
||||
use datatypes::prelude::VectorRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{CreateVectorSnafu, Result, UnexpectedValuesLengthSnafu};
|
||||
use crate::util;
|
||||
use crate::util::ColumnExpr;
|
||||
|
||||
/// Try to build create table request from insert data.
|
||||
pub fn build_create_expr_from_insertion(
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_id: Option<TableId>,
|
||||
table_name: &str,
|
||||
columns: &[Column],
|
||||
engine: &str,
|
||||
) -> Result<CreateTableExpr> {
|
||||
let table_name = TableReference::full(catalog_name, schema_name, table_name);
|
||||
let column_exprs = ColumnExpr::from_columns(columns);
|
||||
util::build_create_table_expr(
|
||||
table_id,
|
||||
&table_name,
|
||||
column_exprs,
|
||||
engine,
|
||||
"Created on insertion",
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn add_values_to_builder(
|
||||
data_type: ConcreteDataType,
|
||||
@@ -62,7 +87,276 @@ fn is_null(null_mask: &BitVec, idx: usize) -> Option<bool> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::{assert_eq, vec};
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column::Values;
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
use api::v1::{
|
||||
Column, ColumnDataType, ColumnDataTypeExtension, Decimal128, DecimalTypeExtension,
|
||||
IntervalMonthDayNano, SemanticType,
|
||||
};
|
||||
use common_base::BitVec;
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use crate::error::ColumnDataTypeSnafu;
|
||||
|
||||
#[inline]
|
||||
fn build_column_schema(
|
||||
column_name: &str,
|
||||
datatype: i32,
|
||||
nullable: bool,
|
||||
) -> error::Result<ColumnSchema> {
|
||||
let datatype_wrapper =
|
||||
ColumnDataTypeWrapper::try_new(datatype, None).context(ColumnDataTypeSnafu)?;
|
||||
|
||||
Ok(ColumnSchema::new(
|
||||
column_name,
|
||||
datatype_wrapper.into(),
|
||||
nullable,
|
||||
))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_create_table_request() {
|
||||
let table_id = Some(10);
|
||||
let table_name = "test_metric";
|
||||
|
||||
assert!(
|
||||
build_create_expr_from_insertion("", "", table_id, table_name, &[], MITO_ENGINE)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let create_expr = build_create_expr_from_insertion(
|
||||
"",
|
||||
"",
|
||||
table_id,
|
||||
table_name,
|
||||
&insert_batch.0,
|
||||
MITO_ENGINE,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table_id, create_expr.table_id.map(|x| x.id));
|
||||
assert_eq!(table_name, create_expr.table_name);
|
||||
assert_eq!("Created on insertion".to_string(), create_expr.desc);
|
||||
assert_eq!(
|
||||
vec![create_expr.column_defs[0].name.clone()],
|
||||
create_expr.primary_keys
|
||||
);
|
||||
|
||||
let column_defs = create_expr.column_defs;
|
||||
assert_eq!(column_defs[5].name, create_expr.time_index);
|
||||
assert_eq!(7, column_defs.len());
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "host")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "cpu")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "memory")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "time")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "interval")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "ts")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
let decimal_column = column_defs.iter().find(|c| c.name == "decimals").unwrap();
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(38, 10),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
decimal_column.data_type,
|
||||
decimal_column.datatype_extension,
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_new_columns() {
|
||||
let mut columns = Vec::with_capacity(1);
|
||||
let cpu_column = build_column_schema("cpu", 10, true).unwrap();
|
||||
let ts_column = build_column_schema("ts", 15, false)
|
||||
.unwrap()
|
||||
.with_time_index(true);
|
||||
columns.push(cpu_column);
|
||||
columns.push(ts_column);
|
||||
|
||||
let schema = Arc::new(SchemaBuilder::try_from(columns).unwrap().build().unwrap());
|
||||
|
||||
assert!(
|
||||
util::extract_new_columns(&schema, ColumnExpr::from_columns(&[]))
|
||||
.unwrap()
|
||||
.is_none()
|
||||
);
|
||||
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let add_columns =
|
||||
util::extract_new_columns(&schema, ColumnExpr::from_columns(&insert_batch.0))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(5, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
host_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let memory_column = &add_columns.add_columns[1];
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
memory_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let time_column = &add_columns.add_columns[2];
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
time_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let interval_column = &add_columns.add_columns[3];
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
interval_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let decimal_column = &add_columns.add_columns[4];
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(38, 10),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
decimal_column.column_def.as_ref().unwrap().data_type,
|
||||
decimal_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.datatype_extension
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_null() {
|
||||
@@ -77,4 +371,127 @@ mod tests {
|
||||
assert_eq!(None, is_null(&null_mask, 16));
|
||||
assert_eq!(None, is_null(&null_mask, 99));
|
||||
}
|
||||
|
||||
fn mock_insert_batch() -> (Vec<Column>, u32) {
|
||||
let row_count = 2;
|
||||
|
||||
let host_vals = Values {
|
||||
string_values: vec!["host1".to_string(), "host2".to_string()],
|
||||
..Default::default()
|
||||
};
|
||||
let host_column = Column {
|
||||
column_name: "host".to_string(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
values: Some(host_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let cpu_vals = Values {
|
||||
f64_values: vec![0.31],
|
||||
..Default::default()
|
||||
};
|
||||
let cpu_column = Column {
|
||||
column_name: "cpu".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(cpu_vals),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_vals = Values {
|
||||
f64_values: vec![0.1],
|
||||
..Default::default()
|
||||
};
|
||||
let mem_column = Column {
|
||||
column_name: "memory".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(mem_vals),
|
||||
null_mask: vec![1],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let time_vals = Values {
|
||||
time_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
};
|
||||
let time_column = Column {
|
||||
column_name: "time".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(time_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let interval1 = IntervalMonthDayNano {
|
||||
months: 1,
|
||||
days: 2,
|
||||
nanoseconds: 3,
|
||||
};
|
||||
let interval2 = IntervalMonthDayNano {
|
||||
months: 4,
|
||||
days: 5,
|
||||
nanoseconds: 6,
|
||||
};
|
||||
let interval_vals = Values {
|
||||
interval_month_day_nano_values: vec![interval1, interval2],
|
||||
..Default::default()
|
||||
};
|
||||
let interval_column = Column {
|
||||
column_name: "interval".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(interval_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let ts_vals = Values {
|
||||
timestamp_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
};
|
||||
let ts_column = Column {
|
||||
column_name: "ts".to_string(),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
values: Some(ts_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
let decimal_vals = Values {
|
||||
decimal128_values: vec![Decimal128 { hi: 0, lo: 123 }, Decimal128 { hi: 0, lo: 456 }],
|
||||
..Default::default()
|
||||
};
|
||||
let decimal_column = Column {
|
||||
column_name: "decimals".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(decimal_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::Decimal128 as i32,
|
||||
datatype_extension: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision: 38,
|
||||
scale: 10,
|
||||
})),
|
||||
}),
|
||||
options: None,
|
||||
};
|
||||
|
||||
(
|
||||
vec![
|
||||
host_column,
|
||||
cpu_column,
|
||||
mem_column,
|
||||
time_column,
|
||||
interval_column,
|
||||
ts_column,
|
||||
decimal_column,
|
||||
],
|
||||
row_count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,3 +19,4 @@ pub mod insert;
|
||||
pub mod util;
|
||||
|
||||
pub use alter::{alter_expr_to_request, create_table_schema};
|
||||
pub use insert::build_create_expr_from_insertion;
|
||||
|
||||
@@ -236,414 +236,3 @@ pub fn extract_new_columns(
|
||||
}))
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::{assert_eq, vec};
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::column::Values;
|
||||
use api::v1::column_data_type_extension::TypeExt;
|
||||
use api::v1::{
|
||||
Column, ColumnDataType, ColumnDataTypeExtension, Decimal128, DecimalTypeExtension,
|
||||
IntervalMonthDayNano, SemanticType,
|
||||
};
|
||||
use common_catalog::consts::MITO_ENGINE;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, SchemaBuilder};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use crate::error::ColumnDataTypeSnafu;
|
||||
|
||||
#[inline]
|
||||
fn build_column_schema(
|
||||
column_name: &str,
|
||||
datatype: i32,
|
||||
nullable: bool,
|
||||
) -> error::Result<ColumnSchema> {
|
||||
let datatype_wrapper =
|
||||
ColumnDataTypeWrapper::try_new(datatype, None).context(ColumnDataTypeSnafu)?;
|
||||
|
||||
Ok(ColumnSchema::new(
|
||||
column_name,
|
||||
datatype_wrapper.into(),
|
||||
nullable,
|
||||
))
|
||||
}
|
||||
|
||||
fn build_create_expr_from_insertion(
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_id: Option<TableId>,
|
||||
table_name: &str,
|
||||
columns: &[Column],
|
||||
engine: &str,
|
||||
) -> Result<CreateTableExpr> {
|
||||
let table_name = TableReference::full(catalog_name, schema_name, table_name);
|
||||
let column_exprs = ColumnExpr::from_columns(columns);
|
||||
build_create_table_expr(
|
||||
table_id,
|
||||
&table_name,
|
||||
column_exprs,
|
||||
engine,
|
||||
"Created on insertion",
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_create_table_request() {
|
||||
let table_id = Some(10);
|
||||
let table_name = "test_metric";
|
||||
|
||||
assert!(
|
||||
build_create_expr_from_insertion("", "", table_id, table_name, &[], MITO_ENGINE)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let create_expr = build_create_expr_from_insertion(
|
||||
"",
|
||||
"",
|
||||
table_id,
|
||||
table_name,
|
||||
&insert_batch.0,
|
||||
MITO_ENGINE,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table_id, create_expr.table_id.map(|x| x.id));
|
||||
assert_eq!(table_name, create_expr.table_name);
|
||||
assert_eq!("Created on insertion".to_string(), create_expr.desc);
|
||||
assert_eq!(
|
||||
vec![create_expr.column_defs[0].name.clone()],
|
||||
create_expr.primary_keys
|
||||
);
|
||||
|
||||
let column_defs = create_expr.column_defs;
|
||||
assert_eq!(column_defs[5].name, create_expr.time_index);
|
||||
assert_eq!(7, column_defs.len());
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "host")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "cpu")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "memory")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "time")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "interval")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
column_defs
|
||||
.iter()
|
||||
.find(|c| c.name == "ts")
|
||||
.unwrap()
|
||||
.data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
let decimal_column = column_defs.iter().find(|c| c.name == "decimals").unwrap();
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(38, 10),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
decimal_column.data_type,
|
||||
decimal_column.datatype_extension,
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_new_columns() {
|
||||
let mut columns = Vec::with_capacity(1);
|
||||
let cpu_column = build_column_schema("cpu", 10, true).unwrap();
|
||||
let ts_column = build_column_schema("ts", 15, false)
|
||||
.unwrap()
|
||||
.with_time_index(true);
|
||||
columns.push(cpu_column);
|
||||
columns.push(ts_column);
|
||||
|
||||
let schema = Arc::new(SchemaBuilder::try_from(columns).unwrap().build().unwrap());
|
||||
|
||||
assert!(extract_new_columns(&schema, ColumnExpr::from_columns(&[]))
|
||||
.unwrap()
|
||||
.is_none());
|
||||
|
||||
let insert_batch = mock_insert_batch();
|
||||
|
||||
let add_columns = extract_new_columns(&schema, ColumnExpr::from_columns(&insert_batch.0))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(5, add_columns.add_columns.len());
|
||||
let host_column = &add_columns.add_columns[0];
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
host_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let memory_column = &add_columns.add_columns[1];
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
memory_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let time_column = &add_columns.add_columns[2];
|
||||
assert_eq!(
|
||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
time_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let interval_column = &add_columns.add_columns[3];
|
||||
assert_eq!(
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
interval_column.column_def.as_ref().unwrap().data_type,
|
||||
None
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
|
||||
let decimal_column = &add_columns.add_columns[4];
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(38, 10),
|
||||
ConcreteDataType::from(
|
||||
ColumnDataTypeWrapper::try_new(
|
||||
decimal_column.column_def.as_ref().unwrap().data_type,
|
||||
decimal_column
|
||||
.column_def
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.datatype_extension
|
||||
)
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
assert!(host_column.add_if_not_exists);
|
||||
}
|
||||
|
||||
fn mock_insert_batch() -> (Vec<Column>, u32) {
|
||||
let row_count = 2;
|
||||
|
||||
let host_vals = Values {
|
||||
string_values: vec!["host1".to_string(), "host2".to_string()],
|
||||
..Default::default()
|
||||
};
|
||||
let host_column = Column {
|
||||
column_name: "host".to_string(),
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
values: Some(host_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::String as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let cpu_vals = Values {
|
||||
f64_values: vec![0.31],
|
||||
..Default::default()
|
||||
};
|
||||
let cpu_column = Column {
|
||||
column_name: "cpu".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(cpu_vals),
|
||||
null_mask: vec![2],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_vals = Values {
|
||||
f64_values: vec![0.1],
|
||||
..Default::default()
|
||||
};
|
||||
let mem_column = Column {
|
||||
column_name: "memory".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(mem_vals),
|
||||
null_mask: vec![1],
|
||||
datatype: ColumnDataType::Float64 as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let time_vals = Values {
|
||||
time_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
};
|
||||
let time_column = Column {
|
||||
column_name: "time".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(time_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimeMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let interval1 = IntervalMonthDayNano {
|
||||
months: 1,
|
||||
days: 2,
|
||||
nanoseconds: 3,
|
||||
};
|
||||
let interval2 = IntervalMonthDayNano {
|
||||
months: 4,
|
||||
days: 5,
|
||||
nanoseconds: 6,
|
||||
};
|
||||
let interval_vals = Values {
|
||||
interval_month_day_nano_values: vec![interval1, interval2],
|
||||
..Default::default()
|
||||
};
|
||||
let interval_column = Column {
|
||||
column_name: "interval".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(interval_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::IntervalMonthDayNano as i32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let ts_vals = Values {
|
||||
timestamp_millisecond_values: vec![100, 101],
|
||||
..Default::default()
|
||||
};
|
||||
let ts_column = Column {
|
||||
column_name: "ts".to_string(),
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
values: Some(ts_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
..Default::default()
|
||||
};
|
||||
let decimal_vals = Values {
|
||||
decimal128_values: vec![Decimal128 { hi: 0, lo: 123 }, Decimal128 { hi: 0, lo: 456 }],
|
||||
..Default::default()
|
||||
};
|
||||
let decimal_column = Column {
|
||||
column_name: "decimals".to_string(),
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
values: Some(decimal_vals),
|
||||
null_mask: vec![0],
|
||||
datatype: ColumnDataType::Decimal128 as i32,
|
||||
datatype_extension: Some(ColumnDataTypeExtension {
|
||||
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||
precision: 38,
|
||||
scale: 10,
|
||||
})),
|
||||
}),
|
||||
options: None,
|
||||
};
|
||||
|
||||
(
|
||||
vec![
|
||||
host_column,
|
||||
cpu_column,
|
||||
mem_column,
|
||||
time_column,
|
||||
interval_column,
|
||||
ts_column,
|
||||
decimal_column,
|
||||
],
|
||||
row_count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon", "dep:deadpool-postgres", "dep:deadpool"]
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -36,8 +36,8 @@ common-wal.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datatypes.workspace = true
|
||||
deadpool = { workspace = true, optional = true }
|
||||
deadpool-postgres = { workspace = true, optional = true }
|
||||
deadpool.workspace = true
|
||||
deadpool-postgres.workspace = true
|
||||
derive_builder.workspace = true
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_procedure::Status;
|
||||
use common_telemetry::info;
|
||||
@@ -26,7 +25,6 @@ use super::cursor::DropDatabaseCursor;
|
||||
use super::{DropDatabaseContext, DropTableTarget};
|
||||
use crate::ddl::drop_database::State;
|
||||
use crate::ddl::drop_table::executor::DropTableExecutor;
|
||||
use crate::ddl::utils::extract_region_wal_options;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
@@ -109,22 +107,8 @@ impl State for DropDatabaseExecutor {
|
||||
self.physical_table_id,
|
||||
self.physical_region_routes.clone(),
|
||||
);
|
||||
|
||||
// Deletes topic-region mapping if dropping physical table
|
||||
let region_wal_options =
|
||||
if let TableRouteValue::Physical(table_route_value) = &table_route_value {
|
||||
let datanode_table_values = ddl_ctx
|
||||
.table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.regions(self.physical_table_id, table_route_value)
|
||||
.await?;
|
||||
extract_region_wal_options(&datanode_table_values)?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
executor
|
||||
.on_destroy_metadata(ddl_ctx, &table_route_value, ®ion_wal_options)
|
||||
.on_destroy_metadata(ddl_ctx, &table_route_value)
|
||||
.await?;
|
||||
executor.invalidate_table_cache(ddl_ctx).await?;
|
||||
executor
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
pub(crate) mod executor;
|
||||
mod metadata;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{ExternalSnafu, FromJsonSnafu, ToJsonSnafu};
|
||||
@@ -26,10 +24,8 @@ use common_procedure::{
|
||||
};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing::warn;
|
||||
use common_wal::options::WalOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
@@ -135,11 +131,7 @@ impl DropTableProcedure {
|
||||
);
|
||||
// Deletes table metadata logically.
|
||||
self.executor
|
||||
.on_delete_metadata(
|
||||
&self.context,
|
||||
table_route_value,
|
||||
&self.data.region_wal_options,
|
||||
)
|
||||
.on_delete_metadata(&self.context, table_route_value)
|
||||
.await?;
|
||||
info!("Deleted table metadata for table {table_id}");
|
||||
self.data.state = DropTableState::InvalidateTableCache;
|
||||
@@ -171,11 +163,7 @@ impl DropTableProcedure {
|
||||
self.data.physical_region_routes.clone(),
|
||||
);
|
||||
self.executor
|
||||
.on_delete_metadata_tombstone(
|
||||
&self.context,
|
||||
table_route_value,
|
||||
&self.data.region_wal_options,
|
||||
)
|
||||
.on_delete_metadata_tombstone(&self.context, table_route_value)
|
||||
.await?;
|
||||
|
||||
self.dropping_regions.clear();
|
||||
@@ -255,11 +243,7 @@ impl Procedure for DropTableProcedure {
|
||||
self.data.physical_region_routes.clone(),
|
||||
);
|
||||
self.executor
|
||||
.on_restore_metadata(
|
||||
&self.context,
|
||||
table_route_value,
|
||||
&self.data.region_wal_options,
|
||||
)
|
||||
.on_restore_metadata(&self.context, table_route_value)
|
||||
.await
|
||||
.map_err(ProcedureError::external)
|
||||
}
|
||||
@@ -273,8 +257,6 @@ pub struct DropTableData {
|
||||
pub physical_region_routes: Vec<RegionRoute>,
|
||||
pub physical_table_id: Option<TableId>,
|
||||
#[serde(default)]
|
||||
pub region_wal_options: HashMap<RegionNumber, WalOptions>,
|
||||
#[serde(default)]
|
||||
pub allow_rollback: bool,
|
||||
}
|
||||
|
||||
@@ -286,7 +268,6 @@ impl DropTableData {
|
||||
task,
|
||||
physical_region_routes: vec![],
|
||||
physical_table_id: None,
|
||||
region_wal_options: HashMap::new(),
|
||||
allow_rollback: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::v1::region::{
|
||||
region_request, DropRequest as PbDropRegionRequest, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
@@ -21,10 +19,9 @@ use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_wal::options::WalOptions;
|
||||
use futures::future::join_all;
|
||||
use snafu::ensure;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
use table::table_name::TableName;
|
||||
|
||||
@@ -116,15 +113,9 @@ impl DropTableExecutor {
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<RegionNumber, WalOptions>,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.delete_table_metadata(
|
||||
self.table_id,
|
||||
&self.table,
|
||||
table_route_value,
|
||||
region_wal_options,
|
||||
)
|
||||
.delete_table_metadata(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -133,15 +124,9 @@ impl DropTableExecutor {
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<u32, WalOptions>,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.delete_table_metadata_tombstone(
|
||||
self.table_id,
|
||||
&self.table,
|
||||
table_route_value,
|
||||
region_wal_options,
|
||||
)
|
||||
.delete_table_metadata_tombstone(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -150,15 +135,9 @@ impl DropTableExecutor {
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<u32, WalOptions>,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.destroy_table_metadata(
|
||||
self.table_id,
|
||||
&self.table,
|
||||
table_route_value,
|
||||
region_wal_options,
|
||||
)
|
||||
.destroy_table_metadata(self.table_id, &self.table, table_route_value)
|
||||
.await?;
|
||||
|
||||
let detecting_regions = if table_route_value.is_physical() {
|
||||
@@ -177,15 +156,9 @@ impl DropTableExecutor {
|
||||
&self,
|
||||
ctx: &DdlContext,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<u32, WalOptions>,
|
||||
) -> Result<()> {
|
||||
ctx.table_metadata_manager
|
||||
.restore_table_metadata(
|
||||
self.table_id,
|
||||
&self.table,
|
||||
table_route_value,
|
||||
region_wal_options,
|
||||
)
|
||||
.restore_table_metadata(self.table_id, &self.table, table_route_value)
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ use snafu::OptionExt;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::utils::extract_region_wal_options;
|
||||
use crate::error::{self, Result};
|
||||
|
||||
impl DropTableProcedure {
|
||||
@@ -31,6 +30,9 @@ impl DropTableProcedure {
|
||||
.get_physical_table_route(task.table_id)
|
||||
.await?;
|
||||
|
||||
self.data.physical_region_routes = physical_table_route_value.region_routes;
|
||||
self.data.physical_table_id = Some(physical_table_id);
|
||||
|
||||
if physical_table_id == self.data.table_id() {
|
||||
let table_info_value = self
|
||||
.context
|
||||
@@ -45,21 +47,9 @@ impl DropTableProcedure {
|
||||
|
||||
let engine = table_info_value.table_info.meta.engine;
|
||||
// rollback only if dropping the metric physical table fails
|
||||
self.data.allow_rollback = engine.as_str() == METRIC_ENGINE_NAME;
|
||||
|
||||
// Deletes topic-region mapping if dropping physical table
|
||||
let datanode_table_values = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.datanode_table_manager()
|
||||
.regions(physical_table_id, &physical_table_route_value)
|
||||
.await?;
|
||||
self.data.region_wal_options = extract_region_wal_options(&datanode_table_values)?;
|
||||
self.data.allow_rollback = engine.as_str() == METRIC_ENGINE_NAME
|
||||
}
|
||||
|
||||
self.data.physical_region_routes = physical_table_route_value.region_routes;
|
||||
self.data.physical_table_id = Some(physical_table_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ async fn test_replace_view_metadata() {
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
}
|
||||
|
||||
// Set `or_replace` to be `true` and try again
|
||||
// Set `or_replce` to be `true` and try again
|
||||
task.create_view.or_replace = true;
|
||||
task.create_view.logical_plan = vec![4, 5, 6];
|
||||
task.create_view.definition = "new_definition".to_string();
|
||||
|
||||
@@ -12,23 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::Error as ProcedureError;
|
||||
use common_wal::options::WalOptions;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::DetectingRegion;
|
||||
use crate::error::{
|
||||
Error, OperateDatanodeSnafu, ParseWalOptionsSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu,
|
||||
};
|
||||
use crate::key::datanode_table::DatanodeTableValue;
|
||||
use crate::error::{Error, OperateDatanodeSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::peer::Peer;
|
||||
@@ -158,32 +151,6 @@ pub fn convert_region_routes_to_detecting_regions(
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Parses [WalOptions] from serialized strings in hashmap.
|
||||
pub fn parse_region_wal_options(
|
||||
serialized_options: &HashMap<RegionNumber, String>,
|
||||
) -> Result<HashMap<RegionNumber, WalOptions>> {
|
||||
let mut region_wal_options = HashMap::with_capacity(serialized_options.len());
|
||||
for (region_number, wal_options) in serialized_options {
|
||||
let wal_option = serde_json::from_str::<WalOptions>(wal_options)
|
||||
.context(ParseWalOptionsSnafu { wal_options })?;
|
||||
region_wal_options.insert(*region_number, wal_option);
|
||||
}
|
||||
Ok(region_wal_options)
|
||||
}
|
||||
|
||||
/// Extracts region wal options from [DatanodeTableValue]s.
|
||||
pub fn extract_region_wal_options(
|
||||
datanode_table_values: &Vec<DatanodeTableValue>,
|
||||
) -> Result<HashMap<RegionNumber, WalOptions>> {
|
||||
let mut region_wal_options = HashMap::new();
|
||||
for value in datanode_table_values {
|
||||
let serialized_options = &value.region_info.region_wal_options;
|
||||
let parsed_options = parse_region_wal_options(serialized_options)?;
|
||||
region_wal_options.extend(parsed_options);
|
||||
}
|
||||
Ok(region_wal_options)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -686,8 +686,8 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[snafu(display("Rds transaction retry failed"))]
|
||||
RdsTransactionRetryFailed {
|
||||
#[snafu(display("Postgres transaction retry failed"))]
|
||||
PostgresTransactionRetryFailed {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
@@ -710,15 +710,6 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to parse wal options: {}", wal_options))]
|
||||
ParseWalOptions {
|
||||
wal_options: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -771,8 +762,7 @@ impl ErrorExt for Error {
|
||||
| UnexpectedLogicalRouteTable { .. }
|
||||
| ProcedureOutput { .. }
|
||||
| FromUtf8 { .. }
|
||||
| MetadataCorruption { .. }
|
||||
| ParseWalOptions { .. } => StatusCode::Unexpected,
|
||||
| MetadataCorruption { .. } => StatusCode::Unexpected,
|
||||
|
||||
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
|
||||
|
||||
@@ -824,7 +814,7 @@ impl ErrorExt for Error {
|
||||
| CreatePostgresPool { .. }
|
||||
| GetPostgresConnection { .. }
|
||||
| PostgresTransaction { .. }
|
||||
| RdsTransactionRetryFailed { .. } => StatusCode::Internal,
|
||||
| PostgresTransactionRetryFailed { .. } => StatusCode::Internal,
|
||||
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
//! - This key is mainly used in constructing the view in Datanode and Frontend.
|
||||
//!
|
||||
//! 12. Kafka topic key: `__topic_name/kafka/{topic_name}`
|
||||
//! - The key is used to mark existing topics in kafka for WAL.
|
||||
//! - The key is used to mark existing topics in kafka for WAL.
|
||||
//!
|
||||
//! 13. Topic name to region map key `__topic_region/{topic_name}/{region_id}`
|
||||
//! - Mapping {topic_name} to {region_id}
|
||||
@@ -122,7 +122,6 @@ use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||
};
|
||||
use common_telemetry::warn;
|
||||
use common_wal::options::WalOptions;
|
||||
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
|
||||
use flow::flow_route::FlowRouteValue;
|
||||
use flow::table_flow::TableFlowValue;
|
||||
@@ -137,7 +136,6 @@ use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||
use topic_region::{TopicRegionKey, TopicRegionManager};
|
||||
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
||||
|
||||
use self::catalog_name::{CatalogManager, CatalogNameKey, CatalogNameValue};
|
||||
@@ -308,7 +306,6 @@ pub struct TableMetadataManager {
|
||||
schema_manager: SchemaManager,
|
||||
table_route_manager: TableRouteManager,
|
||||
tombstone_manager: TombstoneManager,
|
||||
topic_region_manager: TopicRegionManager,
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
@@ -459,7 +456,6 @@ impl TableMetadataManager {
|
||||
schema_manager: SchemaManager::new(kv_backend.clone()),
|
||||
table_route_manager: TableRouteManager::new(kv_backend.clone()),
|
||||
tombstone_manager: TombstoneManager::new(kv_backend.clone()),
|
||||
topic_region_manager: TopicRegionManager::new(kv_backend.clone()),
|
||||
kv_backend,
|
||||
}
|
||||
}
|
||||
@@ -652,15 +648,10 @@ impl TableMetadataManager {
|
||||
.table_route_storage()
|
||||
.build_create_txn(table_id, &table_route_value)?;
|
||||
|
||||
let create_topic_region_txn = self
|
||||
.topic_region_manager
|
||||
.build_create_txn(table_id, ®ion_wal_options)?;
|
||||
|
||||
let mut txn = Txn::merge_all(vec![
|
||||
create_table_name_txn,
|
||||
create_table_info_txn,
|
||||
create_table_route_txn,
|
||||
create_topic_region_txn,
|
||||
]);
|
||||
|
||||
if let TableRouteValue::Physical(x) = &table_route_value {
|
||||
@@ -794,7 +785,6 @@ impl TableMetadataManager {
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<RegionNumber, WalOptions>,
|
||||
) -> Result<Vec<Vec<u8>>> {
|
||||
// Builds keys
|
||||
let datanode_ids = if table_route_value.is_physical() {
|
||||
@@ -816,22 +806,13 @@ impl TableMetadataManager {
|
||||
.into_iter()
|
||||
.map(|datanode_id| DatanodeTableKey::new(datanode_id, table_id))
|
||||
.collect::<HashSet<_>>();
|
||||
let topic_region_map = self
|
||||
.topic_region_manager
|
||||
.get_topic_region_mapping(table_id, region_wal_options);
|
||||
let topic_region_keys = topic_region_map
|
||||
.iter()
|
||||
.map(|(region_id, topic)| TopicRegionKey::new(*region_id, topic))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
keys.push(table_name.to_bytes());
|
||||
keys.push(table_info_key.to_bytes());
|
||||
keys.push(table_route_key.to_bytes());
|
||||
for key in &datanode_table_keys {
|
||||
keys.push(key.to_bytes());
|
||||
}
|
||||
for key in topic_region_keys {
|
||||
keys.push(key.to_bytes());
|
||||
}
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
@@ -842,10 +823,8 @@ impl TableMetadataManager {
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<RegionNumber, WalOptions>,
|
||||
) -> Result<()> {
|
||||
let keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
self.tombstone_manager.create(keys).await
|
||||
}
|
||||
|
||||
@@ -856,11 +835,9 @@ impl TableMetadataManager {
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<RegionNumber, WalOptions>,
|
||||
) -> Result<()> {
|
||||
let table_metadata_keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
self.tombstone_manager.delete(table_metadata_keys).await
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
self.tombstone_manager.delete(keys).await
|
||||
}
|
||||
|
||||
/// Restores metadata for table.
|
||||
@@ -870,10 +847,8 @@ impl TableMetadataManager {
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<RegionNumber, WalOptions>,
|
||||
) -> Result<()> {
|
||||
let keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
self.tombstone_manager.restore(keys).await
|
||||
}
|
||||
|
||||
@@ -884,10 +859,8 @@ impl TableMetadataManager {
|
||||
table_id: TableId,
|
||||
table_name: &TableName,
|
||||
table_route_value: &TableRouteValue,
|
||||
region_wal_options: &HashMap<RegionNumber, WalOptions>,
|
||||
) -> Result<()> {
|
||||
let keys =
|
||||
self.table_metadata_keys(table_id, table_name, table_route_value, region_wal_options)?;
|
||||
let keys = self.table_metadata_keys(table_id, table_name, table_route_value)?;
|
||||
let _ = self
|
||||
.kv_backend
|
||||
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||
@@ -1336,9 +1309,8 @@ mod tests {
|
||||
use bytes::Bytes;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_time::util::current_time_millis;
|
||||
use common_wal::options::{KafkaWalOptions, WalOptions};
|
||||
use futures::TryStreamExt;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
use table::table_name::TableName;
|
||||
|
||||
@@ -1351,15 +1323,10 @@ mod tests {
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::{
|
||||
DeserializedValueWithBytes, TableMetadataManager, ViewInfoValue, TOPIC_REGION_PREFIX,
|
||||
};
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManager, ViewInfoValue};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{region_distribution, LeaderState, Region, RegionRoute};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
use crate::wal_options_allocator::{allocate_region_wal_options, WalOptionsAllocator};
|
||||
|
||||
#[test]
|
||||
fn test_deserialized_value_with_bytes() {
|
||||
@@ -1431,63 +1398,16 @@ mod tests {
|
||||
table_metadata_manager: &TableMetadataManager,
|
||||
table_info: RawTableInfo,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
region_wal_options: HashMap<RegionNumber, String>,
|
||||
) -> Result<()> {
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
table_info,
|
||||
TableRouteValue::physical(region_routes),
|
||||
region_wal_options,
|
||||
HashMap::default(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
fn create_mock_region_wal_options() -> HashMap<RegionNumber, WalOptions> {
|
||||
let topics = (0..2)
|
||||
.map(|i| format!("greptimedb_topic{}", i))
|
||||
.collect::<Vec<_>>();
|
||||
let wal_options = topics
|
||||
.iter()
|
||||
.map(|topic| {
|
||||
WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: topic.clone(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
(0..16)
|
||||
.enumerate()
|
||||
.map(|(i, region_number)| (region_number, wal_options[i % wal_options.len()].clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raft_engine_topic_region_map() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||
let region_route = new_test_region_route();
|
||||
let region_routes = &vec![region_route.clone()];
|
||||
let table_info: RawTableInfo =
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let wal_allocator = WalOptionsAllocator::RaftEngine;
|
||||
let regions = (0..16).collect();
|
||||
let region_wal_options = allocate_region_wal_options(regions, &wal_allocator).unwrap();
|
||||
create_physical_table_metadata(
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
region_wal_options.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let topic_region_key = TOPIC_REGION_PREFIX.to_string();
|
||||
let range_req = RangeRequest::new().with_prefix(topic_region_key);
|
||||
let resp = mem_kv.range(range_req).await.unwrap();
|
||||
// Should be empty because the topic region map is empty for raft engine.
|
||||
assert!(resp.kvs.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_metadata() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
@@ -1496,17 +1416,11 @@ mod tests {
|
||||
let region_routes = &vec![region_route.clone()];
|
||||
let table_info: RawTableInfo =
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let region_wal_options = create_mock_region_wal_options()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, serde_json::to_string(&v).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
// creates metadata.
|
||||
create_physical_table_metadata(
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
region_wal_options.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1516,7 +1430,6 @@ mod tests {
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
region_wal_options.clone(),
|
||||
)
|
||||
.await
|
||||
.is_ok());
|
||||
@@ -1527,8 +1440,7 @@ mod tests {
|
||||
assert!(create_physical_table_metadata(
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
modified_region_routes,
|
||||
region_wal_options.clone(),
|
||||
modified_region_routes
|
||||
)
|
||||
.await
|
||||
.is_err());
|
||||
@@ -1550,19 +1462,6 @@ mod tests {
|
||||
.unwrap(),
|
||||
region_routes
|
||||
);
|
||||
|
||||
for i in 0..2 {
|
||||
let region_number = i as u32;
|
||||
let region_id = RegionId::new(table_info.ident.table_id, region_number);
|
||||
let topic = format!("greptimedb_topic{}", i);
|
||||
let regions = table_metadata_manager
|
||||
.topic_region_manager
|
||||
.regions(&topic)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(regions.len(), 8);
|
||||
assert_eq!(regions[0], region_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1658,18 +1557,12 @@ mod tests {
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let table_id = table_info.ident.table_id;
|
||||
let datanode_id = 2;
|
||||
let region_wal_options = create_mock_region_wal_options();
|
||||
let serialized_region_wal_options = region_wal_options
|
||||
.iter()
|
||||
.map(|(k, v)| (*k, serde_json::to_string(v).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
// creates metadata.
|
||||
create_physical_table_metadata(
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
serialized_region_wal_options,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1682,22 +1575,12 @@ mod tests {
|
||||
let table_route_value = &TableRouteValue::physical(region_routes.clone());
|
||||
// deletes metadata.
|
||||
table_metadata_manager
|
||||
.delete_table_metadata(
|
||||
table_id,
|
||||
&table_name,
|
||||
table_route_value,
|
||||
®ion_wal_options,
|
||||
)
|
||||
.delete_table_metadata(table_id, &table_name, table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
// Should be ignored.
|
||||
table_metadata_manager
|
||||
.delete_table_metadata(
|
||||
table_id,
|
||||
&table_name,
|
||||
table_route_value,
|
||||
®ion_wal_options,
|
||||
)
|
||||
.delete_table_metadata(table_id, &table_name, table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(table_metadata_manager
|
||||
@@ -1734,19 +1617,6 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(table_route.is_none());
|
||||
// Logical delete removes the topic region mapping as well.
|
||||
let regions = table_metadata_manager
|
||||
.topic_region_manager
|
||||
.regions("greptimedb_topic0")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(regions.len(), 0);
|
||||
let regions = table_metadata_manager
|
||||
.topic_region_manager
|
||||
.regions("greptimedb_topic1")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(regions.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1763,7 +1633,6 @@ mod tests {
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1836,7 +1705,6 @@ mod tests {
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1922,7 +1790,6 @@ mod tests {
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -2003,7 +1870,6 @@ mod tests {
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
region_routes.clone(),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -2114,11 +1980,7 @@ mod tests {
|
||||
let table_id = 1025;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
let options = create_mock_region_wal_options();
|
||||
let serialized_options = options
|
||||
.iter()
|
||||
.map(|(k, v)| (*k, serde_json::to_string(v).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let options = [(0, "test".to_string())].into();
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info,
|
||||
@@ -2145,7 +2007,7 @@ mod tests {
|
||||
leader_down_since: None,
|
||||
},
|
||||
]),
|
||||
serialized_options,
|
||||
options,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -2158,7 +2020,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
table_metadata_manager
|
||||
.destroy_table_metadata(table_id, &table_name, &table_route_value, &options)
|
||||
.destroy_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(mem_kv.is_empty());
|
||||
@@ -2171,11 +2033,7 @@ mod tests {
|
||||
let table_id = 1025;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
let options = create_mock_region_wal_options();
|
||||
let serialized_options = options
|
||||
.iter()
|
||||
.map(|(k, v)| (*k, serde_json::to_string(v).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let options = [(0, "test".to_string())].into();
|
||||
table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info,
|
||||
@@ -2202,7 +2060,7 @@ mod tests {
|
||||
leader_down_since: None,
|
||||
},
|
||||
]),
|
||||
serialized_options,
|
||||
options,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -2218,18 +2076,18 @@ mod tests {
|
||||
let table_name = TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name);
|
||||
let table_route_value = TableRouteValue::physical(region_routes.clone());
|
||||
table_metadata_manager
|
||||
.delete_table_metadata(table_id, &table_name, &table_route_value, &options)
|
||||
.delete_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
table_metadata_manager
|
||||
.restore_table_metadata(table_id, &table_name, &table_route_value, &options)
|
||||
.restore_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
let kvs = mem_kv.dump();
|
||||
assert_eq!(kvs, expected_result);
|
||||
// Should be ignored.
|
||||
table_metadata_manager
|
||||
.restore_table_metadata(table_id, &table_name, &table_route_value, &options)
|
||||
.restore_table_metadata(table_id, &table_name, &table_route_value)
|
||||
.await
|
||||
.unwrap();
|
||||
let kvs = mem_kv.dump();
|
||||
|
||||
@@ -21,7 +21,6 @@ use snafu::OptionExt;
|
||||
use store_api::storage::RegionNumber;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use super::table_route::PhysicalTableRouteValue;
|
||||
use super::MetadataKey;
|
||||
use crate::error::{DatanodeTableInfoNotFoundSnafu, InvalidMetadataSnafu, Result};
|
||||
use crate::key::{
|
||||
@@ -30,8 +29,7 @@ use crate::key::{
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
||||
use crate::rpc::router::region_distribution;
|
||||
use crate::rpc::store::{BatchGetRequest, RangeRequest};
|
||||
use crate::rpc::store::RangeRequest;
|
||||
use crate::rpc::KeyValue;
|
||||
use crate::DatanodeId;
|
||||
|
||||
@@ -174,26 +172,6 @@ impl DatanodeTableManager {
|
||||
Box::pin(stream)
|
||||
}
|
||||
|
||||
/// Find the [DatanodeTableValue]s for the given [TableId] and [PhysicalTableRouteValue].
|
||||
pub async fn regions(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
table_routes: &PhysicalTableRouteValue,
|
||||
) -> Result<Vec<DatanodeTableValue>> {
|
||||
let keys = region_distribution(&table_routes.region_routes)
|
||||
.into_keys()
|
||||
.map(|datanode_id| DatanodeTableKey::new(datanode_id, table_id))
|
||||
.collect::<Vec<_>>();
|
||||
let req = BatchGetRequest {
|
||||
keys: keys.iter().map(|k| k.to_bytes()).collect(),
|
||||
};
|
||||
let resp = self.kv_backend.batch_get(req).await?;
|
||||
resp.kvs
|
||||
.into_iter()
|
||||
.map(datanode_table_value_decoder)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Builds the create datanode table transactions. It only executes while the primary keys comparing successes.
|
||||
pub fn build_create_txn(
|
||||
&self,
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_telemetry::warn;
|
||||
use futures::stream::BoxStream;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
@@ -38,12 +37,6 @@ lazy_static! {
|
||||
"^{FLOW_NAME_KEY_PREFIX}/({NAME_PATTERN})/({NAME_PATTERN})$"
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
/// for compatibility with older flow name with less strict name pattern
|
||||
static ref COMPAT_FLOW_NAME_KEY_PATTERN: Regex = Regex::new(&format!(
|
||||
"^{FLOW_NAME_KEY_PREFIX}/({NAME_PATTERN})/(.*)$"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// The key of mapping {flow_name} to [FlowId].
|
||||
@@ -121,18 +114,12 @@ impl<'a> MetadataKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> {
|
||||
}
|
||||
.build()
|
||||
})?;
|
||||
let captures = FLOW_NAME_KEY_PATTERN
|
||||
.captures(key)
|
||||
.or_else(|| {
|
||||
warn!(
|
||||
"FlowNameKeyInner '{}' is not a valid flow name in newer version.",
|
||||
key
|
||||
);
|
||||
COMPAT_FLOW_NAME_KEY_PATTERN.captures(key)
|
||||
})
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
|
||||
})?;
|
||||
let captures =
|
||||
FLOW_NAME_KEY_PATTERN
|
||||
.captures(key)
|
||||
.context(error::InvalidMetadataSnafu {
|
||||
err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
|
||||
})?;
|
||||
// Safety: pass the regex check above
|
||||
let catalog_name = captures.get(1).unwrap().as_str();
|
||||
let flow_name = captures.get(2).unwrap().as_str();
|
||||
@@ -297,12 +284,6 @@ mod tests {
|
||||
let key = FlowNameKey::from_bytes(&bytes).unwrap();
|
||||
assert_eq!(key.catalog(), "my_catalog");
|
||||
assert_eq!(key.flow_name(), "my_task");
|
||||
|
||||
// compatibility with older version
|
||||
let bytes = b"__flow/name/my_catalog/a/`b`".to_vec();
|
||||
let key = FlowNameKey::from_bytes(&bytes).unwrap();
|
||||
assert_eq!(key.catalog(), "my_catalog");
|
||||
assert_eq!(key.flow_name(), "a/`b`");
|
||||
}
|
||||
#[test]
|
||||
fn test_key_start_range() {
|
||||
|
||||
@@ -26,25 +26,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use common_wal::options::WalOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::metadata::TableId;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ddl::utils::parse_region_wal_options;
|
||||
use crate::error::{Error, InvalidMetadataSnafu, Result};
|
||||
use crate::key::{MetadataKey, TOPIC_REGION_PATTERN, TOPIC_REGION_PREFIX};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||
use crate::rpc::store::{BatchPutRequest, PutRequest, RangeRequest};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
// The TopicRegionKey is a key for the topic-region mapping in the kvbackend.
|
||||
// The layout of the key is `__topic_region/{topic_name}/{region_id}`.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct TopicRegionKey<'a> {
|
||||
pub region_id: RegionId,
|
||||
@@ -60,7 +53,7 @@ impl<'a> TopicRegionKey<'a> {
|
||||
}
|
||||
|
||||
pub fn range_topic_key(topic: &str) -> String {
|
||||
format!("{}/{}/", TOPIC_REGION_PREFIX, topic)
|
||||
format!("{}/{}", TOPIC_REGION_PREFIX, topic)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +80,7 @@ impl Display for TopicRegionKey<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}{}",
|
||||
"{}/{}",
|
||||
Self::range_topic_key(self.topic),
|
||||
self.region_id.as_u64()
|
||||
)
|
||||
@@ -158,24 +151,6 @@ impl TopicRegionManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn build_create_txn(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
region_wal_options: &HashMap<RegionNumber, String>,
|
||||
) -> Result<Txn> {
|
||||
let region_wal_options = parse_region_wal_options(region_wal_options)?;
|
||||
let topic_region_mapping = self.get_topic_region_mapping(table_id, ®ion_wal_options);
|
||||
let topic_region_keys = topic_region_mapping
|
||||
.iter()
|
||||
.map(|(topic, region_id)| TopicRegionKey::new(*topic, region_id))
|
||||
.collect::<Vec<_>>();
|
||||
let operations = topic_region_keys
|
||||
.into_iter()
|
||||
.map(|key| TxnOp::Put(key.to_bytes(), vec![]))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(Txn::new().and_then(operations))
|
||||
}
|
||||
|
||||
/// Returns the list of region ids using specified topic.
|
||||
pub async fn regions(&self, topic: &str) -> Result<Vec<RegionId>> {
|
||||
let prefix = TopicRegionKey::range_topic_key(topic);
|
||||
@@ -194,49 +169,12 @@ impl TopicRegionManager {
|
||||
self.kv_backend.delete(&raw_key, false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn batch_delete(&self, keys: Vec<TopicRegionKey<'_>>) -> Result<()> {
|
||||
let raw_keys = keys.iter().map(|key| key.to_bytes()).collect::<Vec<_>>();
|
||||
let req = BatchDeleteRequest {
|
||||
keys: raw_keys,
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.batch_delete(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieves a mapping of [`RegionId`]s to their corresponding topics name
|
||||
/// based on the provided table ID and WAL options.
|
||||
///
|
||||
/// # Returns
|
||||
/// A vector of tuples, where each tuple contains a [`RegionId`] and its corresponding topic name.
|
||||
pub fn get_topic_region_mapping<'a>(
|
||||
&self,
|
||||
table_id: TableId,
|
||||
region_wal_options: &'a HashMap<RegionNumber, WalOptions>,
|
||||
) -> Vec<(RegionId, &'a str)> {
|
||||
region_wal_options
|
||||
.keys()
|
||||
.filter_map(
|
||||
|region_number| match region_wal_options.get(region_number) {
|
||||
Some(WalOptions::Kafka(kafka)) => {
|
||||
let region_id = RegionId::new(table_id, *region_number);
|
||||
Some((region_id, kafka.topic.as_str()))
|
||||
}
|
||||
Some(WalOptions::RaftEngine) => None,
|
||||
None => None,
|
||||
},
|
||||
)
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_wal::options::KafkaWalOptions;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
@@ -282,45 +220,4 @@ mod tests {
|
||||
key_values.sort_by_key(|id| id.as_u64());
|
||||
assert_eq!(key_values, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topic_region_map() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::default());
|
||||
let manager = TopicRegionManager::new(kv_backend.clone());
|
||||
|
||||
let table_id = 1;
|
||||
let region_wal_options = (0..64)
|
||||
.map(|i| {
|
||||
let region_number = i;
|
||||
let wal_options = if i % 2 == 0 {
|
||||
WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: format!("topic_{}", i),
|
||||
})
|
||||
} else {
|
||||
WalOptions::RaftEngine
|
||||
};
|
||||
(region_number, serde_json::to_string(&wal_options).unwrap())
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let region_wal_options = parse_region_wal_options(®ion_wal_options).unwrap();
|
||||
let mut topic_region_mapping =
|
||||
manager.get_topic_region_mapping(table_id, ®ion_wal_options);
|
||||
let mut expected = (0..64)
|
||||
.filter_map(|i| {
|
||||
if i % 2 == 0 {
|
||||
Some((RegionId::new(table_id, i), format!("topic_{}", i)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
topic_region_mapping.sort_by_key(|(region_id, _)| region_id.as_u64());
|
||||
let topic_region_map = topic_region_mapping
|
||||
.iter()
|
||||
.map(|(region_id, topic)| (*region_id, topic.to_string()))
|
||||
.collect::<Vec<_>>();
|
||||
expected.sort_by_key(|(region_id, _)| region_id.as_u64());
|
||||
assert_eq!(topic_region_map, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ pub mod chroot;
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub mod rds;
|
||||
pub mod postgres;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
|
||||
1024
src/common/meta/src/kv_backend/postgres.rs
Normal file
1024
src/common/meta/src/kv_backend/postgres.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,548 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
|
||||
use backon::{BackoffBuilder, ExponentialBuilder};
|
||||
use common_telemetry::debug;
|
||||
|
||||
use crate::error::{Error, RdsTransactionRetryFailedSnafu, Result};
|
||||
use crate::kv_backend::txn::{
|
||||
Compare, Txn as KvTxn, TxnOp, TxnOpResponse, TxnResponse as KvTxnResponse,
|
||||
};
|
||||
use crate::kv_backend::{KvBackend, TxnService};
|
||||
use crate::metrics::METRIC_META_TXN_REQUEST;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse,
|
||||
RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
mod postgres;
|
||||
|
||||
pub use postgres::PgStore;
|
||||
|
||||
const RDS_STORE_TXN_RETRY_COUNT: usize = 3;
|
||||
|
||||
/// Query executor for rds. It can execute queries or generate a transaction executor.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Executor: Send + Sync {
|
||||
type Transaction<'a>: 'a + Transaction<'a>
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn name() -> &'static str;
|
||||
|
||||
async fn query(&mut self, query: &str, params: &[&Vec<u8>]) -> Result<Vec<KeyValue>>;
|
||||
|
||||
/// Some queries don't need to return any result, such as `DELETE`.
|
||||
async fn execute(&mut self, query: &str, params: &[&Vec<u8>]) -> Result<()> {
|
||||
self.query(query, params).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn txn_executor<'a>(&'a mut self) -> Result<Self::Transaction<'a>>;
|
||||
}
|
||||
|
||||
/// Transaction query executor for rds. It can execute queries in transaction or commit the transaction.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Transaction<'a>: Send + Sync {
|
||||
async fn query(&mut self, query: &str, params: &[&Vec<u8>]) -> Result<Vec<KeyValue>>;
|
||||
|
||||
async fn execute(&mut self, query: &str, params: &[&Vec<u8>]) -> Result<()> {
|
||||
self.query(query, params).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn commit(self) -> Result<()>;
|
||||
}
|
||||
|
||||
/// Factory for creating default and transaction query executors.
|
||||
#[async_trait::async_trait]
|
||||
pub trait ExecutorFactory<T: Executor>: Send + Sync {
|
||||
async fn default_executor(&self) -> Result<T>;
|
||||
|
||||
async fn txn_executor<'a>(&self, default_executor: &'a mut T) -> Result<T::Transaction<'a>>;
|
||||
}
|
||||
|
||||
/// Rds backed store for metsrv
|
||||
pub struct RdsStore<T, S, R>
|
||||
where
|
||||
T: Executor + Send + Sync,
|
||||
S: ExecutorFactory<T> + Send + Sync,
|
||||
{
|
||||
max_txn_ops: usize,
|
||||
txn_retry_count: usize,
|
||||
executor_factory: S,
|
||||
sql_template_set: R,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
pub enum ExecutorImpl<'a, T: Executor + 'a> {
|
||||
Default(T),
|
||||
Txn(T::Transaction<'a>),
|
||||
}
|
||||
|
||||
impl<T: Executor> ExecutorImpl<'_, T> {
|
||||
async fn query(&mut self, query: &str, params: &Vec<&Vec<u8>>) -> Result<Vec<KeyValue>> {
|
||||
match self {
|
||||
Self::Default(executor) => executor.query(query, params).await,
|
||||
Self::Txn(executor) => executor.query(query, params).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn commit(self) -> Result<()> {
|
||||
match self {
|
||||
Self::Txn(executor) => executor.commit().await,
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvQueryExecutor<T: Executor> {
|
||||
async fn range_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
req: RangeRequest,
|
||||
) -> Result<RangeResponse>;
|
||||
|
||||
async fn put_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
req: PutRequest,
|
||||
) -> Result<PutResponse> {
|
||||
let kv = KeyValue {
|
||||
key: req.key,
|
||||
value: req.value,
|
||||
};
|
||||
let mut res = self
|
||||
.batch_put_with_query_executor(
|
||||
query_executor,
|
||||
BatchPutRequest {
|
||||
kvs: vec![kv],
|
||||
prev_kv: req.prev_kv,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !res.prev_kvs.is_empty() {
|
||||
debug_assert!(req.prev_kv);
|
||||
return Ok(PutResponse {
|
||||
prev_kv: Some(res.prev_kvs.remove(0)),
|
||||
});
|
||||
}
|
||||
Ok(PutResponse::default())
|
||||
}
|
||||
|
||||
async fn batch_put_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
req: BatchPutRequest,
|
||||
) -> Result<BatchPutResponse>;
|
||||
|
||||
/// Batch get with certain client. It's needed for a client with transaction.
|
||||
async fn batch_get_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
req: BatchGetRequest,
|
||||
) -> Result<BatchGetResponse>;
|
||||
|
||||
async fn delete_range_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse>;
|
||||
|
||||
async fn batch_delete_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse>;
|
||||
}
|
||||
|
||||
impl<T, S, R> RdsStore<T, S, R>
|
||||
where
|
||||
Self: KvQueryExecutor<T> + Send + Sync,
|
||||
T: Executor + Send + Sync,
|
||||
S: ExecutorFactory<T> + Send + Sync,
|
||||
{
|
||||
async fn execute_txn_cmp(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
cmp: &[Compare],
|
||||
) -> Result<bool> {
|
||||
let batch_get_req = BatchGetRequest {
|
||||
keys: cmp.iter().map(|c| c.key.clone()).collect(),
|
||||
};
|
||||
let res = self
|
||||
.batch_get_with_query_executor(query_executor, batch_get_req)
|
||||
.await?;
|
||||
debug!("batch get res: {:?}", res);
|
||||
let res_map = res
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.value))
|
||||
.collect::<HashMap<Vec<u8>, Vec<u8>>>();
|
||||
for c in cmp {
|
||||
let value = res_map.get(&c.key);
|
||||
if !c.compare_value(value) {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Execute a batch of transaction operations. This function is only used for transactions with the same operation type.
|
||||
async fn try_batch_txn(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
txn_ops: &[TxnOp],
|
||||
) -> Result<Option<Vec<TxnOpResponse>>> {
|
||||
if !check_txn_ops(txn_ops)? {
|
||||
return Ok(None);
|
||||
}
|
||||
// Safety: txn_ops is not empty
|
||||
match txn_ops.first().unwrap() {
|
||||
TxnOp::Delete(_) => self.handle_batch_delete(query_executor, txn_ops).await,
|
||||
TxnOp::Put(_, _) => self.handle_batch_put(query_executor, txn_ops).await,
|
||||
TxnOp::Get(_) => self.handle_batch_get(query_executor, txn_ops).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_batch_delete(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
txn_ops: &[TxnOp],
|
||||
) -> Result<Option<Vec<TxnOpResponse>>> {
|
||||
let mut batch_del_req = BatchDeleteRequest {
|
||||
keys: vec![],
|
||||
prev_kv: true,
|
||||
};
|
||||
for op in txn_ops {
|
||||
if let TxnOp::Delete(key) = op {
|
||||
batch_del_req.keys.push(key.clone());
|
||||
}
|
||||
}
|
||||
let res = self
|
||||
.batch_delete_with_query_executor(query_executor, batch_del_req)
|
||||
.await?;
|
||||
let res_map = res
|
||||
.prev_kvs
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.value))
|
||||
.collect::<HashMap<Vec<u8>, Vec<u8>>>();
|
||||
let mut resps = Vec::with_capacity(txn_ops.len());
|
||||
for op in txn_ops {
|
||||
if let TxnOp::Delete(key) = op {
|
||||
let value = res_map.get(key);
|
||||
resps.push(TxnOpResponse::ResponseDelete(DeleteRangeResponse {
|
||||
deleted: if value.is_some() { 1 } else { 0 },
|
||||
prev_kvs: vec![],
|
||||
}));
|
||||
}
|
||||
}
|
||||
Ok(Some(resps))
|
||||
}
|
||||
|
||||
async fn handle_batch_put(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
txn_ops: &[TxnOp],
|
||||
) -> Result<Option<Vec<TxnOpResponse>>> {
|
||||
let mut batch_put_req = BatchPutRequest {
|
||||
kvs: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
for op in txn_ops {
|
||||
if let TxnOp::Put(key, value) = op {
|
||||
batch_put_req.kvs.push(KeyValue {
|
||||
key: key.clone(),
|
||||
value: value.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
let _ = self
|
||||
.batch_put_with_query_executor(query_executor, batch_put_req)
|
||||
.await?;
|
||||
let mut resps = Vec::with_capacity(txn_ops.len());
|
||||
for op in txn_ops {
|
||||
if let TxnOp::Put(_, _) = op {
|
||||
resps.push(TxnOpResponse::ResponsePut(PutResponse { prev_kv: None }));
|
||||
}
|
||||
}
|
||||
Ok(Some(resps))
|
||||
}
|
||||
|
||||
async fn handle_batch_get(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
txn_ops: &[TxnOp],
|
||||
) -> Result<Option<Vec<TxnOpResponse>>> {
|
||||
let mut batch_get_req = BatchGetRequest { keys: vec![] };
|
||||
for op in txn_ops {
|
||||
if let TxnOp::Get(key) = op {
|
||||
batch_get_req.keys.push(key.clone());
|
||||
}
|
||||
}
|
||||
let res = self
|
||||
.batch_get_with_query_executor(query_executor, batch_get_req)
|
||||
.await?;
|
||||
let res_map = res
|
||||
.kvs
|
||||
.into_iter()
|
||||
.map(|kv| (kv.key, kv.value))
|
||||
.collect::<HashMap<Vec<u8>, Vec<u8>>>();
|
||||
let mut resps = Vec::with_capacity(txn_ops.len());
|
||||
for op in txn_ops {
|
||||
if let TxnOp::Get(key) = op {
|
||||
let value = res_map.get(key);
|
||||
resps.push(TxnOpResponse::ResponseGet(RangeResponse {
|
||||
kvs: value
|
||||
.map(|v| {
|
||||
vec![KeyValue {
|
||||
key: key.clone(),
|
||||
value: v.clone(),
|
||||
}]
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
more: false,
|
||||
}));
|
||||
}
|
||||
}
|
||||
Ok(Some(resps))
|
||||
}
|
||||
|
||||
async fn execute_txn_op(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, T>,
|
||||
op: &TxnOp,
|
||||
) -> Result<TxnOpResponse> {
|
||||
match op {
|
||||
TxnOp::Put(key, value) => {
|
||||
let res = self
|
||||
.put_with_query_executor(
|
||||
query_executor,
|
||||
PutRequest {
|
||||
key: key.clone(),
|
||||
value: value.clone(),
|
||||
prev_kv: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(TxnOpResponse::ResponsePut(res))
|
||||
}
|
||||
TxnOp::Get(key) => {
|
||||
let res = self
|
||||
.range_with_query_executor(
|
||||
query_executor,
|
||||
RangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: vec![],
|
||||
limit: 1,
|
||||
keys_only: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(TxnOpResponse::ResponseGet(res))
|
||||
}
|
||||
TxnOp::Delete(key) => {
|
||||
let res = self
|
||||
.delete_range_with_query_executor(
|
||||
query_executor,
|
||||
DeleteRangeRequest {
|
||||
key: key.clone(),
|
||||
range_end: vec![],
|
||||
prev_kv: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(TxnOpResponse::ResponseDelete(res))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn txn_inner(&self, txn: &KvTxn) -> Result<KvTxnResponse> {
|
||||
let mut default_executor = self.executor_factory.default_executor().await?;
|
||||
let mut txn_executor = ExecutorImpl::Txn(
|
||||
self.executor_factory
|
||||
.txn_executor(&mut default_executor)
|
||||
.await?,
|
||||
);
|
||||
let mut success = true;
|
||||
if txn.c_when {
|
||||
success = self
|
||||
.execute_txn_cmp(&mut txn_executor, &txn.req.compare)
|
||||
.await?;
|
||||
}
|
||||
let mut responses = vec![];
|
||||
if success && txn.c_then {
|
||||
match self
|
||||
.try_batch_txn(&mut txn_executor, &txn.req.success)
|
||||
.await?
|
||||
{
|
||||
Some(res) => responses.extend(res),
|
||||
None => {
|
||||
for txnop in &txn.req.success {
|
||||
let res = self.execute_txn_op(&mut txn_executor, txnop).await?;
|
||||
responses.push(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if !success && txn.c_else {
|
||||
match self
|
||||
.try_batch_txn(&mut txn_executor, &txn.req.failure)
|
||||
.await?
|
||||
{
|
||||
Some(res) => responses.extend(res),
|
||||
None => {
|
||||
for txnop in &txn.req.failure {
|
||||
let res = self.execute_txn_op(&mut txn_executor, txnop).await?;
|
||||
responses.push(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
txn_executor.commit().await?;
|
||||
Ok(KvTxnResponse {
|
||||
responses,
|
||||
succeeded: success,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T, S, R> KvBackend for RdsStore<T, S, R>
|
||||
where
|
||||
R: 'static,
|
||||
Self: KvQueryExecutor<T> + Send + Sync,
|
||||
T: Executor + 'static,
|
||||
S: ExecutorFactory<T> + 'static,
|
||||
{
|
||||
fn name(&self) -> &str {
|
||||
T::name()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
|
||||
let client = self.executor_factory.default_executor().await?;
|
||||
let mut query_executor = ExecutorImpl::Default(client);
|
||||
self.range_with_query_executor(&mut query_executor, req)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
|
||||
let client = self.executor_factory.default_executor().await?;
|
||||
let mut query_executor = ExecutorImpl::Default(client);
|
||||
self.put_with_query_executor(&mut query_executor, req).await
|
||||
}
|
||||
|
||||
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
|
||||
let client = self.executor_factory.default_executor().await?;
|
||||
let mut query_executor = ExecutorImpl::Default(client);
|
||||
self.batch_put_with_query_executor(&mut query_executor, req)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
|
||||
let client = self.executor_factory.default_executor().await?;
|
||||
let mut query_executor = ExecutorImpl::Default(client);
|
||||
self.batch_get_with_query_executor(&mut query_executor, req)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
|
||||
let client = self.executor_factory.default_executor().await?;
|
||||
let mut query_executor = ExecutorImpl::Default(client);
|
||||
self.delete_range_with_query_executor(&mut query_executor, req)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
|
||||
let client = self.executor_factory.default_executor().await?;
|
||||
let mut query_executor = ExecutorImpl::Default(client);
|
||||
self.batch_delete_with_query_executor(&mut query_executor, req)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T, S, R> TxnService for RdsStore<T, S, R>
|
||||
where
|
||||
Self: KvQueryExecutor<T> + Send + Sync,
|
||||
T: Executor + 'static,
|
||||
S: ExecutorFactory<T> + 'static,
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
async fn txn(&self, txn: KvTxn) -> Result<KvTxnResponse> {
|
||||
let _timer = METRIC_META_TXN_REQUEST
|
||||
.with_label_values(&[T::name(), "txn"])
|
||||
.start_timer();
|
||||
|
||||
let mut backoff = ExponentialBuilder::default()
|
||||
.with_min_delay(Duration::from_millis(10))
|
||||
.with_max_delay(Duration::from_millis(200))
|
||||
.with_max_times(self.txn_retry_count)
|
||||
.build();
|
||||
|
||||
loop {
|
||||
match self.txn_inner(&txn).await {
|
||||
Ok(res) => return Ok(res),
|
||||
Err(e) => {
|
||||
if e.is_serialization_error() {
|
||||
let d = backoff.next();
|
||||
if let Some(d) = d {
|
||||
tokio::time::sleep(d).await;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RdsTransactionRetryFailedSnafu {}.fail()
|
||||
}
|
||||
|
||||
fn max_txn_ops(&self) -> usize {
|
||||
self.max_txn_ops
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the transaction operations are the same type.
|
||||
fn check_txn_ops(txn_ops: &[TxnOp]) -> Result<bool> {
|
||||
if txn_ops.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
let same = txn_ops.windows(2).all(|a| {
|
||||
matches!(
|
||||
(&a[0], &a[1]),
|
||||
(TxnOp::Put(_, _), TxnOp::Put(_, _))
|
||||
| (TxnOp::Get(_), TxnOp::Get(_))
|
||||
| (TxnOp::Delete(_), TxnOp::Delete(_))
|
||||
)
|
||||
});
|
||||
Ok(same)
|
||||
}
|
||||
@@ -1,624 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::debug;
|
||||
use deadpool_postgres::{Config, Pool, Runtime};
|
||||
use snafu::ResultExt;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{IsolationLevel, NoTls, Row};
|
||||
|
||||
use crate::error::{
|
||||
CreatePostgresPoolSnafu, GetPostgresConnectionSnafu, PostgresExecutionSnafu,
|
||||
PostgresTransactionSnafu, Result,
|
||||
};
|
||||
use crate::kv_backend::rds::{
|
||||
Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction,
|
||||
RDS_STORE_TXN_RETRY_COUNT,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
pub struct PgClient(deadpool::managed::Object<deadpool_postgres::Manager>);
|
||||
pub struct PgTxnClient<'a>(deadpool_postgres::Transaction<'a>);
|
||||
|
||||
/// Converts a row to a [`KeyValue`].
|
||||
fn key_value_from_row(r: Row) -> KeyValue {
|
||||
KeyValue {
|
||||
key: r.get(0),
|
||||
value: r.get(1),
|
||||
}
|
||||
}
|
||||
|
||||
const EMPTY: &[u8] = &[0];
|
||||
|
||||
/// Type of range template.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum RangeTemplateType {
|
||||
Point,
|
||||
Range,
|
||||
Full,
|
||||
LeftBounded,
|
||||
Prefix,
|
||||
}
|
||||
|
||||
/// Builds params for the given range template type.
|
||||
impl RangeTemplateType {
|
||||
fn build_params(&self, mut key: Vec<u8>, range_end: Vec<u8>) -> Vec<Vec<u8>> {
|
||||
match self {
|
||||
RangeTemplateType::Point => vec![key],
|
||||
RangeTemplateType::Range => vec![key, range_end],
|
||||
RangeTemplateType::Full => vec![],
|
||||
RangeTemplateType::LeftBounded => vec![key],
|
||||
RangeTemplateType::Prefix => {
|
||||
key.push(b'%');
|
||||
vec![key]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Templates for range request.
|
||||
#[derive(Debug, Clone)]
|
||||
struct RangeTemplate {
|
||||
point: String,
|
||||
range: String,
|
||||
full: String,
|
||||
left_bounded: String,
|
||||
prefix: String,
|
||||
}
|
||||
|
||||
impl RangeTemplate {
|
||||
/// Gets the template for the given type.
|
||||
fn get(&self, typ: RangeTemplateType) -> &str {
|
||||
match typ {
|
||||
RangeTemplateType::Point => &self.point,
|
||||
RangeTemplateType::Range => &self.range,
|
||||
RangeTemplateType::Full => &self.full,
|
||||
RangeTemplateType::LeftBounded => &self.left_bounded,
|
||||
RangeTemplateType::Prefix => &self.prefix,
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds limit to the template.
|
||||
fn with_limit(template: &str, limit: i64) -> String {
|
||||
if limit == 0 {
|
||||
return format!("{};", template);
|
||||
}
|
||||
format!("{} LIMIT {};", template, limit)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_prefix_range(start: &[u8], end: &[u8]) -> bool {
|
||||
if start.len() != end.len() {
|
||||
return false;
|
||||
}
|
||||
let l = start.len();
|
||||
let same_prefix = start[0..l - 1] == end[0..l - 1];
|
||||
if let (Some(rhs), Some(lhs)) = (start.last(), end.last()) {
|
||||
return same_prefix && (*rhs + 1) == *lhs;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Determine the template type for range request.
|
||||
fn range_template(key: &[u8], range_end: &[u8]) -> RangeTemplateType {
|
||||
match (key, range_end) {
|
||||
(_, &[]) => RangeTemplateType::Point,
|
||||
(EMPTY, EMPTY) => RangeTemplateType::Full,
|
||||
(_, EMPTY) => RangeTemplateType::LeftBounded,
|
||||
(start, end) => {
|
||||
if is_prefix_range(start, end) {
|
||||
RangeTemplateType::Prefix
|
||||
} else {
|
||||
RangeTemplateType::Range
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate in placeholders for PostgreSQL.
|
||||
fn pg_generate_in_placeholders(from: usize, to: usize) -> Vec<String> {
|
||||
(from..=to).map(|i| format!("${}", i)).collect()
|
||||
}
|
||||
|
||||
/// Factory for building sql templates.
|
||||
struct PgSqlTemplateFactory<'a> {
|
||||
table_name: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> PgSqlTemplateFactory<'a> {
|
||||
/// Creates a new [`SqlTemplateFactory`] with the given table name.
|
||||
fn new(table_name: &'a str) -> Self {
|
||||
Self { table_name }
|
||||
}
|
||||
|
||||
/// Builds the template set for the given table name.
|
||||
fn build(&self) -> PgSqlTemplateSet {
|
||||
let table_name = self.table_name;
|
||||
PgSqlTemplateSet {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
"CREATE TABLE IF NOT EXISTS {table_name}(k bytea PRIMARY KEY, v bytea)",
|
||||
),
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM {table_name} WHERE k = $1"),
|
||||
range: format!("SELECT k, v FROM {table_name} WHERE k >= $1 AND k < $2 ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM {table_name} $1 ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= $1 ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE $1 ORDER BY k"),
|
||||
},
|
||||
delete_template: RangeTemplate {
|
||||
point: format!("DELETE FROM {table_name} WHERE k = $1 RETURNING k,v;"),
|
||||
range: format!("DELETE FROM {table_name} WHERE k >= $1 AND k < $2 RETURNING k,v;"),
|
||||
full: format!("DELETE FROM {table_name} RETURNING k,v"),
|
||||
left_bounded: format!("DELETE FROM {table_name} WHERE k >= $1 RETURNING k,v;"),
|
||||
prefix: format!("DELETE FROM {table_name} WHERE k LIKE $1 RETURNING k,v;"),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Templates for the given table name.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PgSqlTemplateSet {
|
||||
table_name: String,
|
||||
create_table_statement: String,
|
||||
range_template: RangeTemplate,
|
||||
delete_template: RangeTemplate,
|
||||
}
|
||||
|
||||
impl PgSqlTemplateSet {
|
||||
/// Generates the sql for batch get.
|
||||
fn generate_batch_get_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = pg_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch delete.
|
||||
fn generate_batch_delete_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = pg_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!(
|
||||
"DELETE FROM {table_name} WHERE k in ({}) RETURNING k,v;",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch upsert.
|
||||
fn generate_batch_upsert_query(&self, kv_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_placeholders: Vec<String> = (1..=kv_len).map(|i| format!("${}", i)).collect();
|
||||
let in_clause = in_placeholders.join(", ");
|
||||
let mut param_index = kv_len + 1;
|
||||
let mut values_placeholders = Vec::new();
|
||||
for _ in 0..kv_len {
|
||||
values_placeholders.push(format!("(${0}, ${1})", param_index, param_index + 1));
|
||||
param_index += 2;
|
||||
}
|
||||
let values_clause = values_placeholders.join(", ");
|
||||
|
||||
format!(
|
||||
r#"
|
||||
WITH prev AS (
|
||||
SELECT k,v FROM {table_name} WHERE k IN ({in_clause})
|
||||
), update AS (
|
||||
INSERT INTO {table_name} (k, v) VALUES
|
||||
{values_clause}
|
||||
ON CONFLICT (
|
||||
k
|
||||
) DO UPDATE SET
|
||||
v = excluded.v
|
||||
)
|
||||
|
||||
SELECT k, v FROM prev;
|
||||
"#
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Executor for PgClient {
|
||||
type Transaction<'a>
|
||||
= PgTxnClient<'a>
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn name() -> &'static str {
|
||||
"Postgres"
|
||||
}
|
||||
|
||||
async fn query(&mut self, query: &str, params: &[&Vec<u8>]) -> Result<Vec<KeyValue>> {
|
||||
let params: Vec<&(dyn ToSql + Sync)> = params.iter().map(|p| p as _).collect();
|
||||
let stmt = self
|
||||
.0
|
||||
.prepare_cached(query)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu { sql: query })?;
|
||||
let rows = self
|
||||
.0
|
||||
.query(&stmt, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu { sql: query })?;
|
||||
Ok(rows.into_iter().map(key_value_from_row).collect())
|
||||
}
|
||||
|
||||
async fn txn_executor<'a>(&'a mut self) -> Result<Self::Transaction<'a>> {
|
||||
let txn = self
|
||||
.0
|
||||
.build_transaction()
|
||||
.isolation_level(IsolationLevel::Serializable)
|
||||
.start()
|
||||
.await
|
||||
.context(PostgresTransactionSnafu {
|
||||
operation: "begin".to_string(),
|
||||
})?;
|
||||
Ok(PgTxnClient(txn))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<'a> Transaction<'a> for PgTxnClient<'a> {
|
||||
async fn query(&mut self, query: &str, params: &[&Vec<u8>]) -> Result<Vec<KeyValue>> {
|
||||
let params: Vec<&(dyn ToSql + Sync)> = params.iter().map(|p| p as _).collect();
|
||||
let stmt = self
|
||||
.0
|
||||
.prepare_cached(query)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu { sql: query })?;
|
||||
let rows = self
|
||||
.0
|
||||
.query(&stmt, ¶ms)
|
||||
.await
|
||||
.context(PostgresExecutionSnafu { sql: query })?;
|
||||
Ok(rows.into_iter().map(key_value_from_row).collect())
|
||||
}
|
||||
|
||||
async fn commit(self) -> Result<()> {
|
||||
self.0.commit().await.context(PostgresTransactionSnafu {
|
||||
operation: "commit",
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PgExecutorFactory {
|
||||
pool: Pool,
|
||||
}
|
||||
|
||||
impl PgExecutorFactory {
|
||||
async fn client(&self) -> Result<PgClient> {
|
||||
match self.pool.get().await {
|
||||
Ok(client) => Ok(PgClient(client)),
|
||||
Err(e) => GetPostgresConnectionSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ExecutorFactory<PgClient> for PgExecutorFactory {
|
||||
async fn default_executor(&self) -> Result<PgClient> {
|
||||
self.client().await
|
||||
}
|
||||
|
||||
async fn txn_executor<'a>(
|
||||
&self,
|
||||
default_executor: &'a mut PgClient,
|
||||
) -> Result<PgTxnClient<'a>> {
|
||||
default_executor.txn_executor().await
|
||||
}
|
||||
}
|
||||
|
||||
/// A PostgreSQL-backed key-value store for metasrv.
|
||||
/// It uses [deadpool_postgres::Pool] as the connection pool for [RdsStore].
|
||||
pub type PgStore = RdsStore<PgClient, PgExecutorFactory, PgSqlTemplateSet>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvQueryExecutor<PgClient> for PgStore {
|
||||
async fn range_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, PgClient>,
|
||||
req: RangeRequest,
|
||||
) -> Result<RangeResponse> {
|
||||
let template_type = range_template(&req.key, &req.range_end);
|
||||
let template = self.sql_template_set.range_template.get(template_type);
|
||||
let params = template_type.build_params(req.key, req.range_end);
|
||||
let params_ref = params.iter().collect::<Vec<_>>();
|
||||
// Always add 1 to limit to check if there is more data
|
||||
let query =
|
||||
RangeTemplate::with_limit(template, if req.limit == 0 { 0 } else { req.limit + 1 });
|
||||
let limit = req.limit as usize;
|
||||
debug!("query: {:?}, params: {:?}", query, params);
|
||||
let mut kvs = query_executor.query(&query, ¶ms_ref).await?;
|
||||
if req.keys_only {
|
||||
kvs.iter_mut().for_each(|kv| kv.value = vec![]);
|
||||
}
|
||||
// If limit is 0, we always return all data
|
||||
if limit == 0 || kvs.len() <= limit {
|
||||
return Ok(RangeResponse { kvs, more: false });
|
||||
}
|
||||
// If limit is greater than the number of rows, we remove the last row and set more to true
|
||||
let removed = kvs.pop();
|
||||
debug_assert!(removed.is_some());
|
||||
Ok(RangeResponse { kvs, more: true })
|
||||
}
|
||||
|
||||
async fn batch_put_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, PgClient>,
|
||||
req: BatchPutRequest,
|
||||
) -> Result<BatchPutResponse> {
|
||||
let mut in_params = Vec::with_capacity(req.kvs.len() * 3);
|
||||
let mut values_params = Vec::with_capacity(req.kvs.len() * 2);
|
||||
|
||||
for kv in &req.kvs {
|
||||
let processed_key = &kv.key;
|
||||
in_params.push(processed_key);
|
||||
|
||||
let processed_value = &kv.value;
|
||||
values_params.push(processed_key);
|
||||
values_params.push(processed_value);
|
||||
}
|
||||
in_params.extend(values_params);
|
||||
let params = in_params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let query = self
|
||||
.sql_template_set
|
||||
.generate_batch_upsert_query(req.kvs.len());
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
if req.prev_kv {
|
||||
Ok(BatchPutResponse { prev_kvs: kvs })
|
||||
} else {
|
||||
Ok(BatchPutResponse::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Batch get with certain client. It's needed for a client with transaction.
|
||||
async fn batch_get_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, PgClient>,
|
||||
req: BatchGetRequest,
|
||||
) -> Result<BatchGetResponse> {
|
||||
if req.keys.is_empty() {
|
||||
return Ok(BatchGetResponse { kvs: vec![] });
|
||||
}
|
||||
let query = self
|
||||
.sql_template_set
|
||||
.generate_batch_get_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
Ok(BatchGetResponse { kvs })
|
||||
}
|
||||
|
||||
async fn delete_range_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, PgClient>,
|
||||
req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse> {
|
||||
let template_type = range_template(&req.key, &req.range_end);
|
||||
let template = self.sql_template_set.delete_template.get(template_type);
|
||||
let params = template_type.build_params(req.key, req.range_end);
|
||||
let params_ref = params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(template, ¶ms_ref).await?;
|
||||
let mut resp = DeleteRangeResponse::new(kvs.len() as i64);
|
||||
if req.prev_kv {
|
||||
resp.with_prev_kvs(kvs);
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
async fn batch_delete_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, PgClient>,
|
||||
req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse> {
|
||||
if req.keys.is_empty() {
|
||||
return Ok(BatchDeleteResponse::default());
|
||||
}
|
||||
let query = self
|
||||
.sql_template_set
|
||||
.generate_batch_delete_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
if req.prev_kv {
|
||||
Ok(BatchDeleteResponse { prev_kvs: kvs })
|
||||
} else {
|
||||
Ok(BatchDeleteResponse::default())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PgStore {
|
||||
/// Create [PgStore] impl of [KvBackendRef] from url.
|
||||
pub async fn with_url(url: &str, table_name: &str, max_txn_ops: usize) -> Result<KvBackendRef> {
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(url.to_string());
|
||||
// TODO(weny, CookiePie): add tls support
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)?;
|
||||
Self::with_pg_pool(pool, table_name, max_txn_ops).await
|
||||
}
|
||||
|
||||
/// Create [PgStore] impl of [KvBackendRef] from [deadpool_postgres::Pool].
|
||||
pub async fn with_pg_pool(
|
||||
pool: Pool,
|
||||
table_name: &str,
|
||||
max_txn_ops: usize,
|
||||
) -> Result<KvBackendRef> {
|
||||
// This step ensures the postgres metadata backend is ready to use.
|
||||
// We check if greptime_metakv table exists, and we will create a new table
|
||||
// if it does not exist.
|
||||
let client = match pool.get().await {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
return GetPostgresConnectionSnafu {
|
||||
reason: e.to_string(),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
};
|
||||
let template_factory = PgSqlTemplateFactory::new(table_name);
|
||||
let sql_template_set = template_factory.build();
|
||||
client
|
||||
.execute(&sql_template_set.create_table_statement, &[])
|
||||
.await
|
||||
.with_context(|_| PostgresExecutionSnafu {
|
||||
sql: sql_template_set.create_table_statement.to_string(),
|
||||
})?;
|
||||
Ok(Arc::new(Self {
|
||||
max_txn_ops,
|
||||
sql_template_set,
|
||||
txn_retry_count: RDS_STORE_TXN_RETRY_COUNT,
|
||||
executor_factory: PgExecutorFactory { pool },
|
||||
_phantom: PhantomData,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
|
||||
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
unprepare_kv,
|
||||
};
|
||||
|
||||
async fn build_pg_kv_backend(table_name: &str) -> Option<PgStore> {
|
||||
let endpoints = std::env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
|
||||
if endpoints.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut cfg = Config::new();
|
||||
cfg.url = Some(endpoints);
|
||||
let pool = cfg
|
||||
.create_pool(Some(Runtime::Tokio1), NoTls)
|
||||
.context(CreatePostgresPoolSnafu)
|
||||
.unwrap();
|
||||
let client = pool.get().await.unwrap();
|
||||
let template_factory = PgSqlTemplateFactory::new(table_name);
|
||||
let sql_templates = template_factory.build();
|
||||
client
|
||||
.execute(&sql_templates.create_table_statement, &[])
|
||||
.await
|
||||
.context(PostgresExecutionSnafu {
|
||||
sql: sql_templates.create_table_statement.to_string(),
|
||||
})
|
||||
.unwrap();
|
||||
Some(PgStore {
|
||||
max_txn_ops: 128,
|
||||
sql_template_set: sql_templates,
|
||||
txn_retry_count: RDS_STORE_TXN_RETRY_COUNT,
|
||||
executor_factory: PgExecutorFactory { pool },
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_put() {
|
||||
let kv_backend = build_pg_kv_backend("put_test").await.unwrap();
|
||||
let prefix = b"put/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_put_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_range() {
|
||||
let kv_backend = build_pg_kv_backend("range_test").await.unwrap();
|
||||
let prefix = b"range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_range_2() {
|
||||
let kv_backend = build_pg_kv_backend("range2_test").await.unwrap();
|
||||
let prefix = b"range2/";
|
||||
test_kv_range_2_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_batch_get() {
|
||||
let kv_backend = build_pg_kv_backend("batch_get_test").await.unwrap();
|
||||
let prefix = b"batch_get/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_get_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_batch_delete() {
|
||||
let kv_backend = build_pg_kv_backend("batch_delete_test").await.unwrap();
|
||||
let prefix = b"batch_delete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_batch_delete_with_prefix() {
|
||||
let kv_backend = build_pg_kv_backend("batch_delete_with_prefix_test")
|
||||
.await
|
||||
.unwrap();
|
||||
let prefix = b"batch_delete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_delete_range() {
|
||||
let kv_backend = build_pg_kv_backend("delete_range_test").await.unwrap();
|
||||
let prefix = b"delete_range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_compare_and_put() {
|
||||
let kv_backend = build_pg_kv_backend("compare_and_put_test").await.unwrap();
|
||||
let prefix = b"compare_and_put/";
|
||||
let kv_backend = Arc::new(kv_backend);
|
||||
test_kv_compare_and_put_with_prefix(kv_backend.clone(), prefix.to_vec()).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pg_txn() {
|
||||
let kv_backend = build_pg_kv_backend("txn_test").await.unwrap();
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
test_txn_compare_equal(&kv_backend).await;
|
||||
test_txn_compare_greater(&kv_backend).await;
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
}
|
||||
}
|
||||
@@ -1239,7 +1239,6 @@ impl From<QueryContext> for PbQueryContext {
|
||||
timezone,
|
||||
extensions,
|
||||
channel: channel as u32,
|
||||
snapshot_seqs: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod selector;
|
||||
pub(crate) mod topic_creator;
|
||||
mod topic_creator;
|
||||
mod topic_manager;
|
||||
pub(crate) mod topic_pool;
|
||||
mod topic_pool;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::time::Duration;
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct ProcedureConfig {
|
||||
/// Max retry times of procedure.
|
||||
|
||||
@@ -10,7 +10,7 @@ workspace = true
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
chrono-tz.workspace = true
|
||||
chrono-tz = "0.8"
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
humantime.workspace = true
|
||||
|
||||
@@ -394,7 +394,7 @@ impl Default for DatanodeOptions {
|
||||
require_lease_before_startup: false,
|
||||
init_regions_in_background: false,
|
||||
init_regions_parallelism: 16,
|
||||
grpc: GrpcOptions::default().with_bind_addr("127.0.0.1:3001"),
|
||||
grpc: GrpcOptions::default().with_addr("127.0.0.1:3001"),
|
||||
http: HttpOptions::default(),
|
||||
meta_client: None,
|
||||
wal: DatanodeWalConfig::default(),
|
||||
|
||||
@@ -260,12 +260,11 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to handle batch ddl request, ddl_type: {}", ddl_type))]
|
||||
HandleBatchDdlRequest {
|
||||
#[snafu(display("Failed to handle batch request"))]
|
||||
HandleBatchRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
ddl_type: String,
|
||||
},
|
||||
|
||||
#[snafu(display("RegionId {} not found", region_id))]
|
||||
@@ -447,7 +446,7 @@ impl ErrorExt for Error {
|
||||
HandleRegionRequest { source, .. }
|
||||
| GetRegionMetadata { source, .. }
|
||||
| HandleBatchOpenRequest { source, .. }
|
||||
| HandleBatchDdlRequest { source, .. } => source.status_code(),
|
||||
| HandleBatchRequest { source, .. } => source.status_code(),
|
||||
StopRegionEngine { source, .. } => source.status_code(),
|
||||
|
||||
FindLogicalRegions { source, .. } => source.status_code(),
|
||||
|
||||
@@ -89,7 +89,7 @@ impl HeartbeatTask {
|
||||
node_id: opts.node_id.unwrap_or(0),
|
||||
// We use datanode's start time millis as the node's epoch.
|
||||
node_epoch: common_time::util::current_time_millis() as u64,
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.addr, Some(&opts.grpc.hostname)),
|
||||
running: Arc::new(AtomicBool::new(false)),
|
||||
meta_client,
|
||||
region_server,
|
||||
|
||||
@@ -38,7 +38,7 @@ use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::error::Result as DfResult;
|
||||
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter};
|
||||
use datafusion_expr::{LogicalPlan, TableSource};
|
||||
use futures_util::future::try_join_all;
|
||||
use futures::future::try_join_all;
|
||||
use metric_engine::engine::MetricEngine;
|
||||
use mito2::engine::MITO_ENGINE_NAME;
|
||||
use prost::Message;
|
||||
@@ -59,7 +59,8 @@ use store_api::region_engine::{
|
||||
SettableRegionRoleState,
|
||||
};
|
||||
use store_api::region_request::{
|
||||
AffectedRows, BatchRegionDdlRequest, RegionCloseRequest, RegionOpenRequest, RegionRequest,
|
||||
convert_body_to_requests, AffectedRows, BatchRegionRequest, RegionCloseRequest,
|
||||
RegionOpenRequest, RegionPutRequest, RegionRequest, RegionRequestBundle,
|
||||
};
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::{Semaphore, SemaphorePermit};
|
||||
@@ -69,8 +70,8 @@ use tonic::{Request, Response, Result as TonicResult};
|
||||
use crate::error::{
|
||||
self, BuildRegionRequestsSnafu, ConcurrentQueryLimiterClosedSnafu,
|
||||
ConcurrentQueryLimiterTimeoutSnafu, DataFusionSnafu, DecodeLogicalPlanSnafu,
|
||||
ExecuteLogicalPlanSnafu, FindLogicalRegionsSnafu, HandleBatchDdlRequestSnafu,
|
||||
HandleBatchOpenRequestSnafu, HandleRegionRequestSnafu, NewPlanDecoderSnafu,
|
||||
ExecuteLogicalPlanSnafu, FindLogicalRegionsSnafu, HandleBatchOpenRequestSnafu,
|
||||
HandleBatchRequestSnafu, HandleRegionRequestSnafu, NewPlanDecoderSnafu,
|
||||
RegionEngineNotFoundSnafu, RegionNotFoundSnafu, RegionNotReadySnafu, Result,
|
||||
StopRegionEngineSnafu, UnexpectedSnafu, UnsupportedOutputSnafu,
|
||||
};
|
||||
@@ -159,12 +160,19 @@ impl RegionServer {
|
||||
self.inner.handle_request(region_id, request).await
|
||||
}
|
||||
|
||||
/// Returns a table provider for the region. Will set snapshot sequence if available in the context.
|
||||
async fn table_provider(
|
||||
#[tracing::instrument(skip_all, fields(request_type = "Put"))]
|
||||
pub async fn handle_batch_body(&self, body: region_request::Body) -> Result<RegionResponse> {
|
||||
self.inner.handle_batch_body(body).await
|
||||
}
|
||||
|
||||
pub async fn handle_batch_request(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
ctx: Option<&session::context::QueryContext>,
|
||||
) -> Result<Arc<dyn TableProvider>> {
|
||||
batch_request: BatchRegionRequest,
|
||||
) -> Result<RegionResponse> {
|
||||
self.inner.handle_batch_request(batch_request).await
|
||||
}
|
||||
|
||||
async fn table_provider(&self, region_id: RegionId) -> Result<Arc<dyn TableProvider>> {
|
||||
let status = self
|
||||
.inner
|
||||
.region_map
|
||||
@@ -178,7 +186,7 @@ impl RegionServer {
|
||||
|
||||
self.inner
|
||||
.table_provider_factory
|
||||
.create(region_id, status.into_engine(), ctx)
|
||||
.create(region_id, status.into_engine())
|
||||
.await
|
||||
.context(ExecuteLogicalPlanSnafu)
|
||||
}
|
||||
@@ -193,6 +201,9 @@ impl RegionServer {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let region_id = RegionId::from_u64(request.region_id);
|
||||
let provider = self.table_provider(region_id).await?;
|
||||
let catalog_list = Arc::new(DummyCatalogList::with_table_provider(provider));
|
||||
|
||||
let query_ctx: QueryContextRef = request
|
||||
.header
|
||||
@@ -200,10 +211,6 @@ impl RegionServer {
|
||||
.map(|h| Arc::new(h.into()))
|
||||
.unwrap_or_else(|| Arc::new(QueryContextBuilder::default().build()));
|
||||
|
||||
let region_id = RegionId::from_u64(request.region_id);
|
||||
let provider = self.table_provider(region_id, Some(&query_ctx)).await?;
|
||||
let catalog_list = Arc::new(DummyCatalogList::with_table_provider(provider));
|
||||
|
||||
let decoder = self
|
||||
.inner
|
||||
.query_engine
|
||||
@@ -232,10 +239,7 @@ impl RegionServer {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let ctx: Option<session::context::QueryContext> = request.header.as_ref().map(|h| h.into());
|
||||
|
||||
let provider = self.table_provider(request.region_id, ctx.as_ref()).await?;
|
||||
let provider = self.table_provider(request.region_id).await?;
|
||||
|
||||
struct RegionDataSourceInjector {
|
||||
source: Arc<dyn TableSource>,
|
||||
@@ -355,33 +359,27 @@ impl RegionServer {
|
||||
.insert(region_id, RegionEngineWithStatus::Ready(engine));
|
||||
}
|
||||
|
||||
async fn handle_batch_ddl_requests(
|
||||
async fn handle_single_request(
|
||||
&self,
|
||||
request: region_request::Body,
|
||||
region_id: RegionId,
|
||||
request: RegionRequest,
|
||||
) -> Result<RegionResponse> {
|
||||
// Safety: we have already checked the request type in `RegionServer::handle()`.
|
||||
let batch_request = BatchRegionDdlRequest::try_from_request_body(request)
|
||||
.context(BuildRegionRequestsSnafu)?
|
||||
.unwrap();
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
let span = tracing_context.attach(info_span!("RegionServer::handle_batch_ddl_requests"));
|
||||
self.inner
|
||||
.handle_batch_request(batch_request)
|
||||
.trace(span)
|
||||
.await
|
||||
let span = tracing_context.attach(info_span!(
|
||||
"RegionServer::handle_region_request",
|
||||
region_id = region_id.to_string()
|
||||
));
|
||||
self.handle_request(region_id, request).trace(span).await
|
||||
}
|
||||
|
||||
async fn handle_requests_in_parallel(
|
||||
async fn handle_vector_request(
|
||||
&self,
|
||||
request: region_request::Body,
|
||||
requests: Vec<(RegionId, RegionRequest)>,
|
||||
) -> Result<RegionResponse> {
|
||||
let requests =
|
||||
RegionRequest::try_from_request_body(request).context(BuildRegionRequestsSnafu)?;
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
let join_tasks = requests.into_iter().map(|(region_id, req)| {
|
||||
let self_to_move = self;
|
||||
let self_to_move = self.clone();
|
||||
let span = tracing_context.attach(info_span!(
|
||||
"RegionServer::handle_region_request",
|
||||
region_id = region_id.to_string()
|
||||
@@ -407,51 +405,51 @@ impl RegionServer {
|
||||
extensions,
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_requests_in_serial(
|
||||
&self,
|
||||
request: region_request::Body,
|
||||
) -> Result<RegionResponse> {
|
||||
let requests =
|
||||
RegionRequest::try_from_request_body(request).context(BuildRegionRequestsSnafu)?;
|
||||
let tracing_context = TracingContext::from_current_span();
|
||||
|
||||
let mut affected_rows = 0;
|
||||
let mut extensions = HashMap::new();
|
||||
// FIXME(jeremy, ruihang): Once the engine supports merged calls, we should immediately
|
||||
// modify this part to avoid inefficient serial loop calls.
|
||||
for (region_id, req) in requests {
|
||||
let span = tracing_context.attach(info_span!(
|
||||
"RegionServer::handle_region_request",
|
||||
region_id = region_id.to_string()
|
||||
));
|
||||
let result = self.handle_request(region_id, req).trace(span).await?;
|
||||
|
||||
affected_rows += result.affected_rows;
|
||||
extensions.extend(result.extensions);
|
||||
}
|
||||
|
||||
Ok(RegionResponse {
|
||||
affected_rows,
|
||||
extensions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RegionServerHandler for RegionServer {
|
||||
async fn handle(&self, request: region_request::Body) -> ServerResult<RegionResponseV1> {
|
||||
let response = match &request {
|
||||
region_request::Body::Creates(_)
|
||||
| region_request::Body::Drops(_)
|
||||
| region_request::Body::Alters(_) => self.handle_batch_ddl_requests(request).await,
|
||||
region_request::Body::Inserts(_) | region_request::Body::Deletes(_) => {
|
||||
self.handle_requests_in_parallel(request).await
|
||||
}
|
||||
_ => self.handle_requests_in_serial(request).await,
|
||||
if matches!(request, region_request::Body::Inserts(_)) {
|
||||
let resp = self
|
||||
.handle_batch_body(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?;
|
||||
return Ok(RegionResponseV1 {
|
||||
header: Some(ResponseHeader {
|
||||
status: Some(Status {
|
||||
status_code: StatusCode::Success as _,
|
||||
..Default::default()
|
||||
}),
|
||||
}),
|
||||
affected_rows: resp.affected_rows as _,
|
||||
extensions: resp.extensions,
|
||||
});
|
||||
}
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?;
|
||||
|
||||
let bundle = convert_body_to_requests(request)
|
||||
.context(BuildRegionRequestsSnafu)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?;
|
||||
|
||||
let result = match bundle {
|
||||
RegionRequestBundle::Single((region_id, request)) => self
|
||||
.handle_single_request(region_id, request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?,
|
||||
RegionRequestBundle::Vector(requests) => self
|
||||
.handle_vector_request(requests)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?,
|
||||
RegionRequestBundle::Batch(requests) => self
|
||||
.handle_batch_request(requests)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExecuteGrpcRequestSnafu)?,
|
||||
};
|
||||
|
||||
Ok(RegionResponseV1 {
|
||||
header: Some(ResponseHeader {
|
||||
@@ -460,8 +458,8 @@ impl RegionServerHandler for RegionServer {
|
||||
..Default::default()
|
||||
}),
|
||||
}),
|
||||
affected_rows: response.affected_rows as _,
|
||||
extensions: response.extensions,
|
||||
affected_rows: result.affected_rows as _,
|
||||
extensions: result.extensions,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -707,7 +705,7 @@ impl RegionServerInner {
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
self.unset_region_status(region_id, &engine, *region_change);
|
||||
self.unset_region_status(region_id, *region_change);
|
||||
error!(e; "Failed to open region: {}", region_id);
|
||||
errors.push(e);
|
||||
}
|
||||
@@ -716,7 +714,7 @@ impl RegionServerInner {
|
||||
}
|
||||
Err(e) => {
|
||||
for (®ion_id, region_change) in ®ion_changes {
|
||||
self.unset_region_status(region_id, &engine, *region_change);
|
||||
self.unset_region_status(region_id, *region_change);
|
||||
}
|
||||
error!(e; "Failed to open batch regions");
|
||||
errors.push(BoxedError::new(e));
|
||||
@@ -778,28 +776,30 @@ impl RegionServerInner {
|
||||
// limitation: all create requests must be in the same engine.
|
||||
pub async fn handle_batch_request(
|
||||
&self,
|
||||
batch_request: BatchRegionDdlRequest,
|
||||
batch_request: BatchRegionRequest,
|
||||
) -> Result<RegionResponse> {
|
||||
let region_changes = match &batch_request {
|
||||
BatchRegionDdlRequest::Create(requests) => requests
|
||||
BatchRegionRequest::Create(requests) => requests
|
||||
.iter()
|
||||
.map(|(region_id, create)| {
|
||||
let attribute = parse_region_attribute(&create.engine, &create.options)?;
|
||||
Ok((*region_id, RegionChange::Register(attribute)))
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?,
|
||||
BatchRegionDdlRequest::Drop(requests) => requests
|
||||
BatchRegionRequest::Drop(requests) => requests
|
||||
.iter()
|
||||
.map(|(region_id, _)| (*region_id, RegionChange::Deregisters))
|
||||
.collect::<Vec<_>>(),
|
||||
BatchRegionDdlRequest::Alter(requests) => requests
|
||||
BatchRegionRequest::Alter(requests) => requests
|
||||
.iter()
|
||||
.map(|(region_id, _)| (*region_id, RegionChange::None))
|
||||
.collect::<Vec<_>>(),
|
||||
BatchRegionRequest::Put(requests) => requests
|
||||
.iter()
|
||||
.map(|(region_id, _)| (*region_id, RegionChange::None))
|
||||
.collect::<Vec<_>>(),
|
||||
};
|
||||
|
||||
// The ddl procedure will ensure all requests are in the same engine.
|
||||
// Therefore, we can get the engine from the first request.
|
||||
let (first_region_id, first_region_change) = region_changes.first().unwrap();
|
||||
let engine = match self.get_engine(*first_region_id, first_region_change)? {
|
||||
CurrentEngine::Engine(engine) => engine,
|
||||
@@ -810,11 +810,10 @@ impl RegionServerInner {
|
||||
self.set_region_status_not_ready(*region_id, &engine, region_change);
|
||||
}
|
||||
|
||||
let ddl_type = batch_request.request_type();
|
||||
let result = engine
|
||||
.handle_batch_ddl_requests(batch_request)
|
||||
.handle_batch_request(batch_request)
|
||||
.await
|
||||
.context(HandleBatchDdlRequestSnafu { ddl_type });
|
||||
.context(HandleBatchRequestSnafu {});
|
||||
|
||||
match result {
|
||||
Ok(result) => {
|
||||
@@ -830,7 +829,7 @@ impl RegionServerInner {
|
||||
}
|
||||
Err(err) => {
|
||||
for (region_id, region_change) in region_changes {
|
||||
self.unset_region_status(region_id, &engine, region_change);
|
||||
self.unset_region_status(region_id, region_change);
|
||||
}
|
||||
|
||||
Err(err)
|
||||
@@ -891,12 +890,77 @@ impl RegionServerInner {
|
||||
}
|
||||
Err(err) => {
|
||||
// Removes the region status if the operation fails.
|
||||
self.unset_region_status(region_id, &engine, region_change);
|
||||
self.unset_region_status(region_id, region_change);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_batch_body(&self, body: region_request::Body) -> Result<RegionResponse> {
|
||||
let _timer = crate::metrics::HANDLE_REGION_REQUEST_ELAPSED
|
||||
.with_label_values(&["Put"])
|
||||
.start_timer();
|
||||
|
||||
// Group requests by engine.
|
||||
let mut engine_requests: HashMap<
|
||||
String,
|
||||
(RegionEngineRef, Vec<(RegionId, RegionPutRequest)>),
|
||||
> = HashMap::with_capacity(1);
|
||||
match body {
|
||||
region_request::Body::Inserts(inserts) => {
|
||||
let num_requests = inserts.requests.len();
|
||||
for request in inserts.requests {
|
||||
let region_id = RegionId::from_u64(request.region_id);
|
||||
let CurrentEngine::Engine(engine) =
|
||||
self.get_engine(region_id, &RegionChange::None)?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
let Some(rows) = request.rows else {
|
||||
continue;
|
||||
};
|
||||
|
||||
match engine_requests.get_mut(engine.name()) {
|
||||
Some((_, requests)) => {
|
||||
requests.push((region_id, RegionPutRequest { rows, hint: None }))
|
||||
}
|
||||
None => {
|
||||
let mut requests = Vec::with_capacity(num_requests);
|
||||
requests.push((region_id, RegionPutRequest { rows, hint: None }));
|
||||
engine_requests.insert(engine.name().to_string(), (engine, requests));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
for (_, (engine, request)) in engine_requests {
|
||||
engine
|
||||
.handle_batch_request(BatchRegionRequest::Put(request))
|
||||
.await
|
||||
.context(HandleBatchRequestSnafu)?;
|
||||
}
|
||||
|
||||
// match engine
|
||||
// .handle_request(region_id, request)
|
||||
// .await
|
||||
// .with_context(|_| HandleRegionRequestSnafu { region_id })
|
||||
// {
|
||||
// Ok(result) => {
|
||||
// Ok(RegionResponse {
|
||||
// affected_rows: result.affected_rows,
|
||||
// extensions: result.extensions,
|
||||
// })
|
||||
// }
|
||||
// Err(err) => {
|
||||
// Err(err)
|
||||
// }
|
||||
// }
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
fn set_region_status_not_ready(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
@@ -920,21 +984,12 @@ impl RegionServerInner {
|
||||
}
|
||||
}
|
||||
|
||||
fn unset_region_status(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
engine: &RegionEngineRef,
|
||||
region_change: RegionChange,
|
||||
) {
|
||||
fn unset_region_status(&self, region_id: RegionId, region_change: RegionChange) {
|
||||
match region_change {
|
||||
RegionChange::None => {}
|
||||
RegionChange::Register(_) => {
|
||||
RegionChange::Register(_) | RegionChange::Deregisters => {
|
||||
self.region_map.remove(®ion_id);
|
||||
}
|
||||
RegionChange::Deregisters => {
|
||||
self.region_map
|
||||
.insert(region_id, RegionEngineWithStatus::Ready(engine.clone()));
|
||||
}
|
||||
RegionChange::Catchup => {}
|
||||
}
|
||||
}
|
||||
@@ -1315,7 +1370,7 @@ mod tests {
|
||||
.unwrap_err();
|
||||
|
||||
let status = mock_region_server.inner.region_map.get(®ion_id);
|
||||
assert!(status.is_some());
|
||||
assert!(status.is_none());
|
||||
}
|
||||
|
||||
struct CurrentEngineTest {
|
||||
|
||||
@@ -66,8 +66,8 @@ impl<'a> DatanodeServiceBuilder<'a> {
|
||||
let handlers = ServerHandlers::default();
|
||||
|
||||
if let Some(grpc_server) = self.grpc_server.take() {
|
||||
let addr: SocketAddr = self.opts.grpc.bind_addr.parse().context(ParseAddrSnafu {
|
||||
addr: &self.opts.grpc.bind_addr,
|
||||
let addr: SocketAddr = self.opts.grpc.addr.parse().context(ParseAddrSnafu {
|
||||
addr: &self.opts.grpc.addr,
|
||||
})?;
|
||||
let handler: ServerHandler = (Box::new(grpc_server), addr);
|
||||
handlers.insert(handler).await;
|
||||
|
||||
@@ -37,7 +37,7 @@ use store_api::region_engine::{
|
||||
SettableRegionRoleState,
|
||||
};
|
||||
use store_api::region_request::{AffectedRows, RegionRequest};
|
||||
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
use table::TableRef;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
|
||||
@@ -218,10 +218,6 @@ impl RegionEngine for MockRegionEngine {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn get_last_seq_num(&self, _: RegionId) -> Result<Option<SequenceNumber>, BoxedError> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<(), BoxedError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -183,6 +183,12 @@ impl ColumnSchema {
|
||||
self
|
||||
}
|
||||
|
||||
// Put a placeholder to invalidate schemas.all(!has_inverted_index_key).
|
||||
pub fn insert_inverted_index_placeholder(&mut self) {
|
||||
self.metadata
|
||||
.insert(INVERTED_INDEX_KEY.to_string(), "".to_string());
|
||||
}
|
||||
|
||||
pub fn is_inverted_indexed(&self) -> bool {
|
||||
self.metadata
|
||||
.get(INVERTED_INDEX_KEY)
|
||||
@@ -380,11 +386,6 @@ impl ColumnSchema {
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn unset_skipping_options(&mut self) -> Result<()> {
|
||||
self.metadata.remove(SKIPPING_INDEX_KEY);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Column extended type set in column schema's metadata.
|
||||
|
||||
@@ -77,32 +77,27 @@ impl BinaryVector {
|
||||
.unwrap()
|
||||
.iter()
|
||||
{
|
||||
let Some(binary) = binary else {
|
||||
vector.push(None);
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Ok(s) = String::from_utf8(binary.to_vec()) {
|
||||
if let Ok(v) = parse_string_to_vector_type_value(&s, Some(dim)) {
|
||||
vector.push(Some(v));
|
||||
continue;
|
||||
let v = if let Some(binary) = binary {
|
||||
let bytes_size = dim as usize * std::mem::size_of::<f32>();
|
||||
if let Ok(s) = String::from_utf8(binary.to_vec()) {
|
||||
let v = parse_string_to_vector_type_value(&s, Some(dim))?;
|
||||
Some(v)
|
||||
} else if binary.len() == dim as usize * std::mem::size_of::<f32>() {
|
||||
Some(binary.to_vec())
|
||||
} else {
|
||||
return InvalidVectorSnafu {
|
||||
msg: format!(
|
||||
"Unexpected bytes size for vector value, expected {}, got {}",
|
||||
bytes_size,
|
||||
binary.len()
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
}
|
||||
|
||||
let expected_bytes_size = dim as usize * std::mem::size_of::<f32>();
|
||||
if binary.len() == expected_bytes_size {
|
||||
vector.push(Some(binary.to_vec()));
|
||||
continue;
|
||||
} else {
|
||||
return InvalidVectorSnafu {
|
||||
msg: format!(
|
||||
"Unexpected bytes size for vector value, expected {}, got {}",
|
||||
expected_bytes_size,
|
||||
binary.len()
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
None
|
||||
};
|
||||
vector.push(v);
|
||||
}
|
||||
Ok(BinaryVector::from(vector))
|
||||
}
|
||||
|
||||
@@ -387,43 +387,6 @@ impl Decimal128VectorBuilder {
|
||||
|
||||
vectors::impl_try_from_arrow_array_for_vector!(Decimal128Array, Decimal128Vector);
|
||||
|
||||
pub(crate) fn replicate_decimal128(
|
||||
vector: &Decimal128Vector,
|
||||
offsets: &[usize],
|
||||
) -> Decimal128Vector {
|
||||
assert_eq!(offsets.len(), vector.len());
|
||||
|
||||
if offsets.is_empty() {
|
||||
return vector.get_slice(0, 0);
|
||||
}
|
||||
|
||||
// Safety: safe to unwrap because we the vector ensures precision and scale are valid.
|
||||
let mut builder = Decimal128VectorBuilder::with_capacity(*offsets.last().unwrap())
|
||||
.with_precision_and_scale(vector.precision(), vector.scale())
|
||||
.unwrap();
|
||||
|
||||
let mut previous_offset = 0;
|
||||
|
||||
for (offset, value) in offsets.iter().zip(vector.array.iter()) {
|
||||
let repeat_times = *offset - previous_offset;
|
||||
match value {
|
||||
Some(data) => {
|
||||
unsafe {
|
||||
// Safety: std::iter::Repeat and std::iter::Take implement TrustedLen.
|
||||
builder
|
||||
.mutable_array
|
||||
.append_trusted_len_iter(std::iter::repeat(data).take(repeat_times));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
builder.mutable_array.append_nulls(repeat_times);
|
||||
}
|
||||
}
|
||||
previous_offset = *offset;
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use arrow_array::Decimal128Array;
|
||||
|
||||
@@ -114,30 +114,13 @@ macro_rules! impl_scalar_vector_op {
|
||||
)+};
|
||||
}
|
||||
|
||||
impl_scalar_vector_op!(BinaryVector, BooleanVector, ListVector, StringVector);
|
||||
|
||||
impl VectorOp for Decimal128Vector {
|
||||
fn replicate(&self, offsets: &[usize]) -> VectorRef {
|
||||
std::sync::Arc::new(replicate::replicate_decimal128(self, offsets))
|
||||
}
|
||||
|
||||
fn find_unique(&self, selected: &mut BitVec, prev_vector: Option<&dyn Vector>) {
|
||||
let prev_vector = prev_vector.and_then(|pv| pv.as_any().downcast_ref::<Decimal128Vector>());
|
||||
find_unique::find_unique_scalar(self, selected, prev_vector);
|
||||
}
|
||||
|
||||
fn filter(&self, filter: &BooleanVector) -> Result<VectorRef> {
|
||||
filter::filter_non_constant!(self, Decimal128Vector, filter)
|
||||
}
|
||||
|
||||
fn cast(&self, to_type: &ConcreteDataType) -> Result<VectorRef> {
|
||||
cast::cast_non_constant!(self, to_type)
|
||||
}
|
||||
|
||||
fn take(&self, indices: &UInt32Vector) -> Result<VectorRef> {
|
||||
take::take_indices!(self, Decimal128Vector, indices)
|
||||
}
|
||||
}
|
||||
impl_scalar_vector_op!(
|
||||
BinaryVector,
|
||||
BooleanVector,
|
||||
ListVector,
|
||||
StringVector,
|
||||
Decimal128Vector
|
||||
);
|
||||
|
||||
impl<T: LogicalPrimitiveType> VectorOp for PrimitiveVector<T> {
|
||||
fn replicate(&self, offsets: &[usize]) -> VectorRef {
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::prelude::*;
|
||||
pub(crate) use crate::vectors::decimal::replicate_decimal128;
|
||||
pub(crate) use crate::vectors::null::replicate_null;
|
||||
pub(crate) use crate::vectors::primitive::replicate_primitive;
|
||||
|
||||
@@ -46,7 +45,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::vectors::constant::ConstantVector;
|
||||
use crate::vectors::{Decimal128Vector, Int32Vector, NullVector, StringVector, VectorOp};
|
||||
use crate::vectors::{Int32Vector, NullVector, StringVector, VectorOp};
|
||||
|
||||
#[test]
|
||||
fn test_replicate_primitive() {
|
||||
@@ -168,23 +167,4 @@ mod tests {
|
||||
impl_replicate_timestamp_test!(Microsecond);
|
||||
impl_replicate_timestamp_test!(Nanosecond);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_replicate_decimal() {
|
||||
let data = vec![100];
|
||||
// create a decimal vector
|
||||
let v = Decimal128Vector::from_values(data.clone())
|
||||
.with_precision_and_scale(10, 2)
|
||||
.unwrap();
|
||||
let offsets = [5];
|
||||
let v = v.replicate(&offsets);
|
||||
assert_eq!(5, v.len());
|
||||
|
||||
let expect: VectorRef = Arc::new(
|
||||
Decimal128Vector::from_values(vec![100; 5])
|
||||
.with_precision_and_scale(10, 2)
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(expect, v);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ use store_api::region_request::{
|
||||
AffectedRows, RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest,
|
||||
RegionRequest,
|
||||
};
|
||||
use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
|
||||
use store_api::storage::{RegionId, ScanRequest};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::config::EngineConfig;
|
||||
@@ -114,10 +114,6 @@ impl RegionEngine for FileRegionEngine {
|
||||
None
|
||||
}
|
||||
|
||||
async fn get_last_seq_num(&self, _: RegionId) -> Result<Option<SequenceNumber>, BoxedError> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<(), BoxedError> {
|
||||
self.inner
|
||||
.set_region_role(region_id, role)
|
||||
|
||||
@@ -121,7 +121,7 @@ impl Default for FlownodeOptions {
|
||||
cluster_id: None,
|
||||
node_id: None,
|
||||
flow: FlowConfig::default(),
|
||||
grpc: GrpcOptions::default().with_bind_addr("127.0.0.1:3004"),
|
||||
grpc: GrpcOptions::default().with_addr("127.0.0.1:3004"),
|
||||
http: HttpOptions::default(),
|
||||
meta_client: None,
|
||||
logging: LoggingOptions::default(),
|
||||
|
||||
@@ -22,7 +22,7 @@ use common_meta::key::table_info::TableInfoValue;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use itertools::Itertools;
|
||||
use operator::expr_helper;
|
||||
use operator::expr_factory::CreateExprFactory;
|
||||
use session::context::QueryContextBuilder;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table_reference::TableReference;
|
||||
@@ -32,6 +32,7 @@ use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL};
|
||||
use crate::error::{Error, ExternalSnafu, UnexpectedSnafu};
|
||||
use crate::repr::{ColumnType, RelationDesc, RelationType};
|
||||
use crate::FlowWorkerManager;
|
||||
|
||||
impl FlowWorkerManager {
|
||||
/// Get a worker handle for creating flow, using round robin to select a worker
|
||||
pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle {
|
||||
@@ -65,18 +66,19 @@ impl FlowWorkerManager {
|
||||
let proto_schema = column_schemas_to_proto(tys.clone(), &pks)?;
|
||||
|
||||
// create sink table
|
||||
let create_expr = expr_helper::create_table_expr_by_column_schemas(
|
||||
&TableReference {
|
||||
catalog: &table_name[0],
|
||||
schema: &table_name[1],
|
||||
table: &table_name[2],
|
||||
},
|
||||
&proto_schema,
|
||||
"mito",
|
||||
Some(&format!("Sink table for flow {}", flow_name)),
|
||||
)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
let create_expr = CreateExprFactory {}
|
||||
.create_table_expr_by_column_schemas(
|
||||
&TableReference {
|
||||
catalog: &table_name[0],
|
||||
schema: &table_name[1],
|
||||
table: &table_name[2],
|
||||
},
|
||||
&proto_schema,
|
||||
"mito",
|
||||
Some(&format!("Sink table for flow {}", flow_name)),
|
||||
)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
|
||||
self.submit_create_sink_table_ddl(create_expr).await?;
|
||||
Ok(true)
|
||||
|
||||
@@ -83,7 +83,7 @@ impl HeartbeatTask {
|
||||
) -> Self {
|
||||
Self {
|
||||
node_id: opts.node_id.unwrap_or(0),
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.addr, Some(&opts.grpc.hostname)),
|
||||
meta_client,
|
||||
report_interval: heartbeat_opts.interval,
|
||||
retry_interval: heartbeat_opts.retry_interval,
|
||||
|
||||
@@ -334,7 +334,7 @@ impl FlownodeBuilder {
|
||||
|
||||
let heartbeat_task = self.heartbeat_task;
|
||||
|
||||
let addr = self.opts.grpc.bind_addr;
|
||||
let addr = self.opts.grpc.addr;
|
||||
let instance = FlownodeInstance {
|
||||
server,
|
||||
addr: addr.parse().context(ParseAddrSnafu { addr })?,
|
||||
|
||||
@@ -35,7 +35,6 @@ common-runtime.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
@@ -52,8 +51,8 @@ prometheus.workspace = true
|
||||
promql-parser.workspace = true
|
||||
prost.workspace = true
|
||||
query.workspace = true
|
||||
raft-engine.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
servers.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -24,8 +24,7 @@ use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::http::HttpOptions;
|
||||
|
||||
use crate::service_config::{
|
||||
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, OtlpOptions, PostgresOptions,
|
||||
PromStoreOptions,
|
||||
InfluxdbOptions, MysqlOptions, OpentsdbOptions, OtlpOptions, PostgresOptions, PromStoreOptions,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
@@ -41,7 +40,6 @@ pub struct FrontendOptions {
|
||||
pub opentsdb: OpentsdbOptions,
|
||||
pub influxdb: InfluxdbOptions,
|
||||
pub prom_store: PromStoreOptions,
|
||||
pub jaeger: JaegerOptions,
|
||||
pub otlp: OtlpOptions,
|
||||
pub meta_client: Option<MetaClientOptions>,
|
||||
pub logging: LoggingOptions,
|
||||
@@ -64,7 +62,6 @@ impl Default for FrontendOptions {
|
||||
postgres: PostgresOptions::default(),
|
||||
opentsdb: OpentsdbOptions::default(),
|
||||
influxdb: InfluxdbOptions::default(),
|
||||
jaeger: JaegerOptions::default(),
|
||||
prom_store: PromStoreOptions::default(),
|
||||
otlp: OtlpOptions::default(),
|
||||
meta_client: None,
|
||||
|
||||
@@ -56,7 +56,7 @@ impl HeartbeatTask {
|
||||
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
|
||||
) -> Self {
|
||||
HeartbeatTask {
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
peer_addr: addrs::resolve_addr(&opts.grpc.addr, Some(&opts.grpc.hostname)),
|
||||
meta_client,
|
||||
report_interval: heartbeat_opts.interval.as_millis() as u64,
|
||||
retry_interval: heartbeat_opts.retry_interval.as_millis() as u64,
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
pub mod builder;
|
||||
mod grpc;
|
||||
mod influxdb;
|
||||
mod jaeger;
|
||||
mod log_handler;
|
||||
mod logs;
|
||||
mod opentsdb;
|
||||
@@ -41,7 +40,7 @@ use common_procedure::local::{LocalManager, ManagerConfig};
|
||||
use common_procedure::options::ProcedureConfig;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use common_query::Output;
|
||||
use common_telemetry::{debug, error, info, tracing};
|
||||
use common_telemetry::{debug, error, tracing};
|
||||
use datafusion_expr::LogicalPlan;
|
||||
use log_store::raft_engine::RaftEngineBackend;
|
||||
use operator::delete::DeleterRef;
|
||||
@@ -56,6 +55,7 @@ use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
|
||||
use query::query_engine::DescribeResult;
|
||||
use query::stats::StatementStatistics;
|
||||
use query::QueryEngineRef;
|
||||
use raft_engine::{Config, ReadableSize, RecoveryMode};
|
||||
use servers::error as server_error;
|
||||
use servers::error::{AuthSnafu, ExecuteQuerySnafu, ParsePromQLSnafu};
|
||||
use servers::export_metrics::ExportMetricsTask;
|
||||
@@ -66,7 +66,7 @@ use servers::prometheus_handler::PrometheusHandler;
|
||||
use servers::query_handler::grpc::GrpcQueryHandler;
|
||||
use servers::query_handler::sql::SqlQueryHandler;
|
||||
use servers::query_handler::{
|
||||
InfluxdbLineProtocolHandler, JaegerQueryHandler, LogQueryHandler, OpenTelemetryProtocolHandler,
|
||||
InfluxdbLineProtocolHandler, LogQueryHandler, OpenTelemetryProtocolHandler,
|
||||
OpentsdbProtocolHandler, PipelineHandler, PromStoreProtocolHandler,
|
||||
};
|
||||
use servers::server::ServerHandlers;
|
||||
@@ -101,7 +101,6 @@ pub trait FrontendInstance:
|
||||
+ PrometheusHandler
|
||||
+ PipelineHandler
|
||||
+ LogQueryHandler
|
||||
+ JaegerQueryHandler
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
@@ -135,15 +134,19 @@ impl Instance {
|
||||
kv_backend_config: KvBackendConfig,
|
||||
procedure_config: ProcedureConfig,
|
||||
) -> Result<(KvBackendRef, ProcedureManagerRef)> {
|
||||
info!(
|
||||
"Creating metadata kvbackend with config: {:?}",
|
||||
kv_backend_config
|
||||
);
|
||||
let kv_backend = RaftEngineBackend::try_open_with_cfg(dir, &kv_backend_config)
|
||||
let kv_backend = Arc::new(
|
||||
RaftEngineBackend::try_open_with_cfg(Config {
|
||||
dir,
|
||||
purge_threshold: ReadableSize(kv_backend_config.purge_threshold.0),
|
||||
recovery_mode: RecoveryMode::TolerateTailCorruption,
|
||||
batch_compression_threshold: ReadableSize::kb(8),
|
||||
target_file_size: ReadableSize(kv_backend_config.file_size.0),
|
||||
..Default::default()
|
||||
})
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::OpenRaftEngineBackendSnafu)?;
|
||||
.context(error::OpenRaftEngineBackendSnafu)?,
|
||||
);
|
||||
|
||||
let kv_backend = Arc::new(kv_backend);
|
||||
let state_store = Arc::new(KvStateStore::new(kv_backend.clone()));
|
||||
|
||||
let manager_config = ManagerConfig {
|
||||
@@ -169,10 +172,6 @@ impl Instance {
|
||||
&self.catalog_manager
|
||||
}
|
||||
|
||||
pub fn query_engine(&self) -> &QueryEngineRef {
|
||||
&self.query_engine
|
||||
}
|
||||
|
||||
pub fn plugins(&self) -> Plugins {
|
||||
self.plugins.clone()
|
||||
}
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use catalog::CatalogManagerRef;
|
||||
use common_function::function::{Function, FunctionRef};
|
||||
use common_function::scalars::json::json_get::{
|
||||
JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetString,
|
||||
};
|
||||
use common_function::scalars::udf::create_udf;
|
||||
use common_function::state::FunctionState;
|
||||
use common_query::Output;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use datafusion::dataframe::DataFrame;
|
||||
use datafusion::execution::context::SessionContext;
|
||||
use datafusion::execution::SessionStateBuilder;
|
||||
use datafusion_expr::{col, lit, lit_timestamp_nano, Expr};
|
||||
use query::QueryEngineRef;
|
||||
use serde_json::Value as JsonValue;
|
||||
use servers::error::{
|
||||
CatalogSnafu, CollectRecordbatchSnafu, DataFusionSnafu, Result as ServerResult,
|
||||
TableNotFoundSnafu,
|
||||
};
|
||||
use servers::http::jaeger::QueryTraceParams;
|
||||
use servers::otlp::trace::{
|
||||
DURATION_NANO_COLUMN, SERVICE_NAME_COLUMN, SPAN_ATTRIBUTES_COLUMN, SPAN_ID_COLUMN,
|
||||
SPAN_KIND_COLUMN, SPAN_KIND_PREFIX, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, TRACE_ID_COLUMN,
|
||||
TRACE_TABLE_NAME,
|
||||
};
|
||||
use servers::query_handler::JaegerQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use super::Instance;
|
||||
|
||||
const DEFAULT_LIMIT: usize = 100;
|
||||
|
||||
#[async_trait]
|
||||
impl JaegerQueryHandler for Instance {
|
||||
async fn get_services(&self, ctx: QueryContextRef) -> ServerResult<Output> {
|
||||
// It's equivalent to `SELECT DISTINCT(service_name) FROM {db}.{trace_table}`.
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
self.catalog_manager(),
|
||||
self.query_engine(),
|
||||
vec![col(SERVICE_NAME_COLUMN)],
|
||||
vec![],
|
||||
Some(DEFAULT_LIMIT),
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn get_operations(
|
||||
&self,
|
||||
ctx: QueryContextRef,
|
||||
service_name: &str,
|
||||
span_kind: Option<&str>,
|
||||
) -> ServerResult<Output> {
|
||||
let mut filters = vec![col(SERVICE_NAME_COLUMN).eq(lit(service_name))];
|
||||
|
||||
if let Some(span_kind) = span_kind {
|
||||
filters.push(col(SPAN_KIND_COLUMN).eq(lit(format!(
|
||||
"{}{}",
|
||||
SPAN_KIND_PREFIX,
|
||||
span_kind.to_uppercase()
|
||||
))));
|
||||
}
|
||||
|
||||
// It's equivalent to `SELECT span_name, span_kind FROM {db}.{trace_table} WHERE service_name = '{service_name}'`.
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
self.catalog_manager(),
|
||||
self.query_engine(),
|
||||
vec![
|
||||
col(SPAN_NAME_COLUMN),
|
||||
col(SPAN_KIND_COLUMN),
|
||||
col(SERVICE_NAME_COLUMN),
|
||||
],
|
||||
filters,
|
||||
Some(DEFAULT_LIMIT),
|
||||
None,
|
||||
false,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn get_trace(&self, ctx: QueryContextRef, trace_id: &str) -> ServerResult<Output> {
|
||||
// It's equivalent to `SELECT trace_id, timestamp, duration_nano, service_name, span_name, span_id, span_attributes FROM {db}.{trace_table} WHERE trace_id = '{trace_id}'`.
|
||||
let selects = vec![
|
||||
col(TRACE_ID_COLUMN),
|
||||
col(TIMESTAMP_COLUMN),
|
||||
col(DURATION_NANO_COLUMN),
|
||||
col(SERVICE_NAME_COLUMN),
|
||||
col(SPAN_NAME_COLUMN),
|
||||
col(SPAN_ID_COLUMN),
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
];
|
||||
|
||||
let filters = vec![col(TRACE_ID_COLUMN).eq(lit(trace_id))];
|
||||
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
self.catalog_manager(),
|
||||
self.query_engine(),
|
||||
selects,
|
||||
filters,
|
||||
Some(DEFAULT_LIMIT),
|
||||
None,
|
||||
false,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn find_traces(
|
||||
&self,
|
||||
ctx: QueryContextRef,
|
||||
query_params: QueryTraceParams,
|
||||
) -> ServerResult<Output> {
|
||||
let selects = vec![
|
||||
col(TRACE_ID_COLUMN),
|
||||
col(TIMESTAMP_COLUMN),
|
||||
col(DURATION_NANO_COLUMN),
|
||||
col(SERVICE_NAME_COLUMN),
|
||||
col(SPAN_NAME_COLUMN),
|
||||
col(SPAN_ID_COLUMN),
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
];
|
||||
|
||||
let mut filters = vec![];
|
||||
|
||||
if let Some(operation_name) = query_params.operation_name {
|
||||
filters.push(col(SPAN_NAME_COLUMN).eq(lit(operation_name)));
|
||||
}
|
||||
|
||||
if let Some(start_time) = query_params.start_time {
|
||||
filters.push(col(TIMESTAMP_COLUMN).gt_eq(lit_timestamp_nano(start_time)));
|
||||
}
|
||||
|
||||
if let Some(end_time) = query_params.end_time {
|
||||
filters.push(col(TIMESTAMP_COLUMN).lt_eq(lit_timestamp_nano(end_time)));
|
||||
}
|
||||
|
||||
if let Some(min_duration) = query_params.min_duration {
|
||||
filters.push(col(DURATION_NANO_COLUMN).gt_eq(lit(min_duration)));
|
||||
}
|
||||
|
||||
if let Some(max_duration) = query_params.max_duration {
|
||||
filters.push(col(DURATION_NANO_COLUMN).lt_eq(lit(max_duration)));
|
||||
}
|
||||
|
||||
Ok(query_trace_table(
|
||||
ctx,
|
||||
self.catalog_manager(),
|
||||
self.query_engine(),
|
||||
selects,
|
||||
filters,
|
||||
Some(DEFAULT_LIMIT),
|
||||
query_params.tags,
|
||||
false,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn query_trace_table(
|
||||
ctx: QueryContextRef,
|
||||
catalog_manager: &CatalogManagerRef,
|
||||
query_engine: &QueryEngineRef,
|
||||
selects: Vec<Expr>,
|
||||
filters: Vec<Expr>,
|
||||
limit: Option<usize>,
|
||||
tags: Option<HashMap<String, JsonValue>>,
|
||||
distinct: bool,
|
||||
) -> ServerResult<Output> {
|
||||
let db = ctx.get_db_string();
|
||||
let table = catalog_manager
|
||||
.table(ctx.current_catalog(), &db, TRACE_TABLE_NAME, Some(&ctx))
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table: TRACE_TABLE_NAME,
|
||||
catalog: ctx.current_catalog(),
|
||||
schema: db,
|
||||
})?;
|
||||
|
||||
let df_context = create_df_context(query_engine, ctx.clone())?;
|
||||
|
||||
let dataframe = df_context
|
||||
.read_table(Arc::new(DfTableProviderAdapter::new(table)))
|
||||
.context(DataFusionSnafu)?;
|
||||
|
||||
let dataframe = dataframe.select(selects).context(DataFusionSnafu)?;
|
||||
|
||||
// Apply all filters.
|
||||
let dataframe = filters
|
||||
.into_iter()
|
||||
.chain(tags.map_or(Ok(vec![]), |t| tags_filters(&dataframe, t))?)
|
||||
.try_fold(dataframe, |df, expr| {
|
||||
df.filter(expr).context(DataFusionSnafu)
|
||||
})?;
|
||||
|
||||
// Apply the distinct if needed.
|
||||
let dataframe = if distinct {
|
||||
dataframe.distinct().context(DataFusionSnafu)?
|
||||
} else {
|
||||
dataframe
|
||||
};
|
||||
|
||||
// Apply the limit if needed.
|
||||
let dataframe = if let Some(limit) = limit {
|
||||
dataframe.limit(0, Some(limit)).context(DataFusionSnafu)?
|
||||
} else {
|
||||
dataframe
|
||||
};
|
||||
|
||||
// Execute the query and collect the result.
|
||||
let stream = dataframe.execute_stream().await.context(DataFusionSnafu)?;
|
||||
|
||||
let output = Output::new_with_stream(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream).context(CollectRecordbatchSnafu)?,
|
||||
));
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
// The current implementation registers UDFs during the planning stage, which makes it difficult
|
||||
// to utilize them through DataFrame APIs. To address this limitation, we create a new session
|
||||
// context and register the required UDFs, allowing them to be decoupled from the global context.
|
||||
// TODO(zyy17): Is it possible or necessary to reuse the existing session context?
|
||||
fn create_df_context(
|
||||
query_engine: &QueryEngineRef,
|
||||
ctx: QueryContextRef,
|
||||
) -> ServerResult<SessionContext> {
|
||||
let df_context = SessionContext::new_with_state(
|
||||
SessionStateBuilder::new_from_existing(query_engine.engine_state().session_state()).build(),
|
||||
);
|
||||
|
||||
// The following JSON UDFs will be used for tags filters.
|
||||
let udfs: Vec<FunctionRef> = vec![
|
||||
Arc::new(JsonGetInt),
|
||||
Arc::new(JsonGetFloat),
|
||||
Arc::new(JsonGetBool),
|
||||
Arc::new(JsonGetString),
|
||||
];
|
||||
|
||||
for udf in udfs {
|
||||
df_context
|
||||
.register_udf(create_udf(udf, ctx.clone(), Arc::new(FunctionState::default())).into());
|
||||
}
|
||||
|
||||
Ok(df_context)
|
||||
}
|
||||
|
||||
fn tags_filters(
|
||||
dataframe: &DataFrame,
|
||||
tags: HashMap<String, JsonValue>,
|
||||
) -> ServerResult<Vec<Expr>> {
|
||||
let mut filters = vec![];
|
||||
|
||||
// NOTE: The key of the tags may contain `.`, for example: `http.status_code`, so we need to use `["http.status_code"]` in json path to access the value.
|
||||
for (key, value) in tags.iter() {
|
||||
if let JsonValue::String(value) = value {
|
||||
filters.push(
|
||||
dataframe
|
||||
.registry()
|
||||
.udf(JsonGetString {}.name())
|
||||
.context(DataFusionSnafu)?
|
||||
.call(vec![
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
lit(format!("[\"{}\"]", key)),
|
||||
])
|
||||
.eq(lit(value)),
|
||||
);
|
||||
}
|
||||
if let JsonValue::Number(value) = value {
|
||||
if value.is_i64() {
|
||||
filters.push(
|
||||
dataframe
|
||||
.registry()
|
||||
.udf(JsonGetInt {}.name())
|
||||
.context(DataFusionSnafu)?
|
||||
.call(vec![
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
lit(format!("[\"{}\"]", key)),
|
||||
])
|
||||
.eq(lit(value.as_i64().unwrap())),
|
||||
);
|
||||
}
|
||||
if value.is_f64() {
|
||||
filters.push(
|
||||
dataframe
|
||||
.registry()
|
||||
.udf(JsonGetFloat {}.name())
|
||||
.context(DataFusionSnafu)?
|
||||
.call(vec![
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
lit(format!("[\"{}\"]", key)),
|
||||
])
|
||||
.eq(lit(value.as_f64().unwrap())),
|
||||
);
|
||||
}
|
||||
}
|
||||
if let JsonValue::Bool(value) = value {
|
||||
filters.push(
|
||||
dataframe
|
||||
.registry()
|
||||
.udf(JsonGetBool {}.name())
|
||||
.context(DataFusionSnafu)?
|
||||
.call(vec![
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
lit(format!("[\"{}\"]", key)),
|
||||
])
|
||||
.eq(lit(*value)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(filters)
|
||||
}
|
||||
@@ -20,11 +20,11 @@ use common_telemetry::tracing;
|
||||
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
|
||||
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
|
||||
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
|
||||
use pipeline::{GreptimePipelineParams, PipelineWay};
|
||||
use pipeline::PipelineWay;
|
||||
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
|
||||
use servers::interceptor::{OpenTelemetryProtocolInterceptor, OpenTelemetryProtocolInterceptorRef};
|
||||
use servers::otlp;
|
||||
use servers::query_handler::{OpenTelemetryProtocolHandler, PipelineHandlerRef};
|
||||
use servers::query_handler::OpenTelemetryProtocolHandler;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -112,10 +112,8 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn logs(
|
||||
&self,
|
||||
pipeline_handler: PipelineHandlerRef,
|
||||
request: ExportLogsServiceRequest,
|
||||
pipeline: PipelineWay,
|
||||
pipeline_params: GreptimePipelineParams,
|
||||
table_name: String,
|
||||
ctx: QueryContextRef,
|
||||
) -> ServerResult<Output> {
|
||||
@@ -130,15 +128,7 @@ impl OpenTelemetryProtocolHandler for Instance {
|
||||
.get::<OpenTelemetryProtocolInterceptorRef<servers::error::Error>>();
|
||||
interceptor_ref.pre_execute(ctx.clone())?;
|
||||
|
||||
let (requests, rows) = otlp::logs::to_grpc_insert_requests(
|
||||
request,
|
||||
pipeline,
|
||||
pipeline_params,
|
||||
table_name,
|
||||
&ctx,
|
||||
pipeline_handler,
|
||||
)
|
||||
.await?;
|
||||
let (requests, rows) = otlp::logs::to_grpc_insert_requests(request, pipeline, table_name)?;
|
||||
|
||||
let _guard = if let Some(limiter) = &self.limiter {
|
||||
let result = limiter.limit_row_inserts(&requests);
|
||||
|
||||
@@ -112,11 +112,6 @@ where
|
||||
if opts.otlp.enable {
|
||||
builder = builder.with_otlp_handler(self.instance.clone());
|
||||
}
|
||||
|
||||
if opts.jaeger.enable {
|
||||
builder = builder.with_jaeger_handler(self.instance.clone());
|
||||
}
|
||||
|
||||
builder
|
||||
}
|
||||
|
||||
@@ -198,7 +193,7 @@ where
|
||||
|
||||
{
|
||||
// Always init GRPC server
|
||||
let grpc_addr = parse_addr(&opts.grpc.bind_addr)?;
|
||||
let grpc_addr = parse_addr(&opts.grpc.addr)?;
|
||||
let grpc_server = self.build_grpc_server(&opts)?;
|
||||
handlers.insert((Box::new(grpc_server), grpc_addr)).await;
|
||||
}
|
||||
@@ -232,7 +227,6 @@ where
|
||||
Arc::new(MysqlSpawnConfig::new(
|
||||
opts.tls.should_force_tls(),
|
||||
tls_server_config,
|
||||
opts.keep_alive.as_secs(),
|
||||
opts.reject_no_database.unwrap_or(false),
|
||||
)),
|
||||
);
|
||||
@@ -254,7 +248,6 @@ where
|
||||
ServerSqlQueryHandlerAdapter::arc(instance.clone()),
|
||||
opts.tls.should_force_tls(),
|
||||
tls_server_config,
|
||||
opts.keep_alive.as_secs(),
|
||||
common_runtime::global_runtime(),
|
||||
user_provider.clone(),
|
||||
)) as Box<dyn Server>;
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod influxdb;
|
||||
pub mod jaeger;
|
||||
pub mod mysql;
|
||||
pub mod opentsdb;
|
||||
pub mod otlp;
|
||||
@@ -21,7 +20,6 @@ pub mod postgres;
|
||||
pub mod prom_store;
|
||||
|
||||
pub use influxdb::InfluxdbOptions;
|
||||
pub use jaeger::JaegerOptions;
|
||||
pub use mysql::MysqlOptions;
|
||||
pub use opentsdb::OpentsdbOptions;
|
||||
pub use otlp::OtlpOptions;
|
||||
|
||||
@@ -23,12 +23,6 @@ pub struct MysqlOptions {
|
||||
#[serde(default = "Default::default")]
|
||||
pub tls: TlsOption,
|
||||
pub reject_no_database: Option<bool>,
|
||||
/// Server-side keep-alive time.
|
||||
///
|
||||
/// Set to 0 (default) to disable.
|
||||
#[serde(default = "Default::default")]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub keep_alive: std::time::Duration,
|
||||
}
|
||||
|
||||
impl Default for MysqlOptions {
|
||||
@@ -39,7 +33,6 @@ impl Default for MysqlOptions {
|
||||
runtime_size: 2,
|
||||
tls: TlsOption::default(),
|
||||
reject_no_database: None,
|
||||
keep_alive: std::time::Duration::from_secs(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,12 +22,6 @@ pub struct PostgresOptions {
|
||||
pub runtime_size: usize,
|
||||
#[serde(default = "Default::default")]
|
||||
pub tls: TlsOption,
|
||||
/// Server-side keep-alive time.
|
||||
///
|
||||
/// Set to 0 (default) to disable.
|
||||
#[serde(default = "Default::default")]
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub keep_alive: std::time::Duration,
|
||||
}
|
||||
|
||||
impl Default for PostgresOptions {
|
||||
@@ -37,7 +31,6 @@ impl Default for PostgresOptions {
|
||||
addr: "127.0.0.1:4003".to_string(),
|
||||
runtime_size: 2,
|
||||
tls: Default::default(),
|
||||
keep_alive: std::time::Duration::from_secs(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager>) {
|
||||
let path = staging_dir.path().to_path_buf();
|
||||
(
|
||||
staging_dir,
|
||||
Arc::new(BoundedStager::new(path, 102400, None).await.unwrap()),
|
||||
Arc::new(BoundedStager::new(path, 102400).await.unwrap()),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
common-base.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
|
||||
@@ -17,9 +17,8 @@
|
||||
use std::any::Any;
|
||||
use std::ops::Bound::{Excluded, Included, Unbounded};
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::RwLock;
|
||||
|
||||
use common_config::KvBackendConfig;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::error as meta_error;
|
||||
use common_meta::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||
@@ -31,19 +30,16 @@ use common_meta::rpc::store::{
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::util::get_next_prefix_key;
|
||||
use common_runtime::RepeatedTask;
|
||||
use raft_engine::{Config, Engine, LogBatch, ReadableSize, RecoveryMode};
|
||||
use raft_engine::{Config, Engine, LogBatch};
|
||||
use snafu::{IntoError, ResultExt};
|
||||
|
||||
use crate::error::{self, Error, IoSnafu, RaftEngineSnafu, StartGcTaskSnafu};
|
||||
use crate::raft_engine::log_store::PurgeExpiredFilesFunction;
|
||||
use crate::error::{self, IoSnafu, RaftEngineSnafu};
|
||||
|
||||
pub(crate) const SYSTEM_NAMESPACE: u64 = 0;
|
||||
|
||||
/// RaftEngine based [KvBackend] implementation.
|
||||
pub struct RaftEngineBackend {
|
||||
engine: RwLock<Arc<Engine>>,
|
||||
_gc_task: RepeatedTask<Error>,
|
||||
engine: RwLock<Engine>,
|
||||
}
|
||||
|
||||
fn ensure_dir(dir: &str) -> error::Result<()> {
|
||||
@@ -69,34 +65,15 @@ fn ensure_dir(dir: &str) -> error::Result<()> {
|
||||
}
|
||||
|
||||
impl RaftEngineBackend {
|
||||
pub fn try_open_with_cfg(dir: String, config: &KvBackendConfig) -> error::Result<Self> {
|
||||
let cfg = Config {
|
||||
dir: dir.to_string(),
|
||||
purge_threshold: ReadableSize(config.purge_threshold.0),
|
||||
recovery_mode: RecoveryMode::TolerateTailCorruption,
|
||||
batch_compression_threshold: ReadableSize::kb(8),
|
||||
target_file_size: ReadableSize(config.file_size.0),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
ensure_dir(&dir)?;
|
||||
if let Some(spill_dir) = &cfg.spill_dir {
|
||||
pub fn try_open_with_cfg(config: Config) -> error::Result<Self> {
|
||||
ensure_dir(&config.dir)?;
|
||||
if let Some(spill_dir) = &config.spill_dir {
|
||||
ensure_dir(spill_dir)?;
|
||||
}
|
||||
|
||||
let engine = Arc::new(Engine::open(cfg).context(RaftEngineSnafu)?);
|
||||
let gc_task = RepeatedTask::new(
|
||||
config.purge_interval,
|
||||
Box::new(PurgeExpiredFilesFunction {
|
||||
engine: engine.clone(),
|
||||
}),
|
||||
);
|
||||
gc_task
|
||||
.start(common_runtime::global_runtime())
|
||||
.context(StartGcTaskSnafu)?;
|
||||
let engine = Engine::open(config).context(RaftEngineSnafu)?;
|
||||
Ok(Self {
|
||||
engine: RwLock::new(engine),
|
||||
_gc_task: gc_task,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -421,11 +398,21 @@ mod tests {
|
||||
};
|
||||
use common_meta::rpc::store::{CompareAndPutRequest, CompareAndPutResponse};
|
||||
use common_test_util::temp_dir::create_temp_dir;
|
||||
use raft_engine::{Config, ReadableSize, RecoveryMode};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn build_kv_backend(dir: String) -> RaftEngineBackend {
|
||||
RaftEngineBackend::try_open_with_cfg(dir, &KvBackendConfig::default()).unwrap()
|
||||
let config = Config {
|
||||
dir,
|
||||
spill_dir: None,
|
||||
recovery_mode: RecoveryMode::AbsoluteConsistency,
|
||||
target_file_size: ReadableSize::mb(4),
|
||||
purge_threshold: ReadableSize::mb(16),
|
||||
..Default::default()
|
||||
};
|
||||
let engine = RwLock::new(Engine::open(config).unwrap());
|
||||
RaftEngineBackend { engine }
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -50,7 +50,7 @@ pub struct RaftEngineLogStore {
|
||||
}
|
||||
|
||||
pub struct PurgeExpiredFilesFunction {
|
||||
pub engine: Arc<Engine>,
|
||||
engine: Arc<Engine>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
||||
@@ -24,7 +24,7 @@ use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::rds::PgStore;
|
||||
use common_meta::kv_backend::postgres::PgStore;
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_telemetry::error;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user